From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753885AbbFRXBY (ORCPT ); Thu, 18 Jun 2015 19:01:24 -0400 Received: from terminus.zytor.com ([198.137.202.10]:38499 "EHLO terminus.zytor.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751232AbbFRXBU (ORCPT ); Thu, 18 Jun 2015 19:01:20 -0400 Date: Thu, 18 Jun 2015 16:00:51 -0700 From: tip-bot for Peter Zijlstra Message-ID: Cc: tglx@linutronix.de, linux-kernel@vger.kernel.org, hpa@zytor.com, mingo@kernel.org, peterz@infradead.org Reply-To: mingo@kernel.org, hpa@zytor.com, tglx@linutronix.de, linux-kernel@vger.kernel.org, peterz@infradead.org In-Reply-To: <20150611124742.532642391@infradead.org> References: <20150611124742.532642391@infradead.org> To: linux-tip-commits@vger.kernel.org Subject: [tip:sched/hrtimers] sched: Use replace normalize_task() with __sched_setscheduler() Git-Commit-ID: dbc7f069b93a249340e974d6e8f55656280d8701 X-Mailer: tip-git-log-daemon Robot-ID: Robot-Unsubscribe: Contact to get blacklisted from these emails MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset=UTF-8 Content-Disposition: inline Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Commit-ID: dbc7f069b93a249340e974d6e8f55656280d8701 Gitweb: http://git.kernel.org/tip/dbc7f069b93a249340e974d6e8f55656280d8701 Author: Peter Zijlstra AuthorDate: Thu, 11 Jun 2015 14:46:38 +0200 Committer: Thomas Gleixner CommitDate: Fri, 19 Jun 2015 00:25:26 +0200 sched: Use replace normalize_task() with __sched_setscheduler() Reduce duplicate logic; normalize_task() is a simplified version of __sched_setscheduler(). Parametrize the difference and collapse. This reduces the amount of check_class_changed() sites. Signed-off-by: Peter Zijlstra (Intel) Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.532642391@infradead.org Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 65 +++++++++++++++++++---------------------------------- 1 file changed, 23 insertions(+), 42 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fa32bc0..b610ef9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3438,7 +3438,7 @@ static bool dl_param_changed(struct task_struct *p, static int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, - bool user) + bool user, bool pi) { int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : MAX_RT_PRIO - 1 - attr->sched_priority; @@ -3624,18 +3624,20 @@ change: p->sched_reset_on_fork = reset_on_fork; oldprio = p->prio; - /* - * Take priority boosted tasks into account. If the new - * effective priority is unchanged, we just store the new - * normal parameters and do not touch the scheduler class and - * the runqueue. This will be done when the task deboost - * itself. - */ - new_effective_prio = rt_mutex_get_effective_prio(p, newprio); - if (new_effective_prio == oldprio) { - __setscheduler_params(p, attr); - task_rq_unlock(rq, p, &flags); - return 0; + if (pi) { + /* + * Take priority boosted tasks into account. If the new + * effective priority is unchanged, we just store the new + * normal parameters and do not touch the scheduler class and + * the runqueue. This will be done when the task deboost + * itself. + */ + new_effective_prio = rt_mutex_get_effective_prio(p, newprio); + if (new_effective_prio == oldprio) { + __setscheduler_params(p, attr); + task_rq_unlock(rq, p, &flags); + return 0; + } } queued = task_on_rq_queued(p); @@ -3646,7 +3648,7 @@ change: put_prev_task(rq, p); prev_class = p->sched_class; - __setscheduler(rq, p, attr, true); + __setscheduler(rq, p, attr, pi); if (running) p->sched_class->set_curr_task(rq); @@ -3661,7 +3663,8 @@ change: check_class_changed(rq, p, prev_class, oldprio); task_rq_unlock(rq, p, &flags); - rt_mutex_adjust_pi(p); + if (pi) + rt_mutex_adjust_pi(p); return 0; } @@ -3682,7 +3685,7 @@ static int _sched_setscheduler(struct task_struct *p, int policy, attr.sched_policy = policy; } - return __sched_setscheduler(p, &attr, check); + return __sched_setscheduler(p, &attr, check, true); } /** * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. @@ -3703,7 +3706,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); int sched_setattr(struct task_struct *p, const struct sched_attr *attr) { - return __sched_setscheduler(p, attr, true); + return __sched_setscheduler(p, attr, true, true); } EXPORT_SYMBOL_GPL(sched_setattr); @@ -7361,32 +7364,12 @@ EXPORT_SYMBOL(___might_sleep); #endif #ifdef CONFIG_MAGIC_SYSRQ -static void normalize_task(struct rq *rq, struct task_struct *p) +void normalize_rt_tasks(void) { - const struct sched_class *prev_class = p->sched_class; + struct task_struct *g, *p; struct sched_attr attr = { .sched_policy = SCHED_NORMAL, }; - int old_prio = p->prio; - int queued; - - queued = task_on_rq_queued(p); - if (queued) - dequeue_task(rq, p, 0); - __setscheduler(rq, p, &attr, false); - if (queued) { - enqueue_task(rq, p, 0); - resched_curr(rq); - } - - check_class_changed(rq, p, prev_class, old_prio); -} - -void normalize_rt_tasks(void) -{ - struct task_struct *g, *p; - unsigned long flags; - struct rq *rq; read_lock(&tasklist_lock); for_each_process_thread(g, p) { @@ -7413,9 +7396,7 @@ void normalize_rt_tasks(void) continue; } - rq = task_rq_lock(p, &flags); - normalize_task(rq, p); - task_rq_unlock(rq, p, &flags); + __sched_setscheduler(p, &attr, false, false); } read_unlock(&tasklist_lock); }