All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: tip-bot for Peter Zijlstra <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: hpa@zytor.com, mingo@kernel.org, peterz@infradead.org,
	linux-kernel@vger.kernel.org, tglx@linutronix.de
Subject: [tip:sched/hrtimers] sched: Streamline the task migration locking a little
Date: Thu, 18 Jun 2015 16:03:15 -0700	[thread overview]
Message-ID: <tip-5e16bbc2fb4053755705da5dd3557bbc0e5ccef6@git.kernel.org> (raw)
In-Reply-To: <20150611124743.755256708@infradead.org>

Commit-ID:  5e16bbc2fb4053755705da5dd3557bbc0e5ccef6
Gitweb:     http://git.kernel.org/tip/5e16bbc2fb4053755705da5dd3557bbc0e5ccef6
Author:     Peter Zijlstra <peterz@infradead.org>
AuthorDate: Thu, 11 Jun 2015 14:46:51 +0200
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Fri, 19 Jun 2015 00:25:27 +0200

sched: Streamline the task migration locking a little

The whole migrate_task{,s}() locking seems a little shaky, there's a
lot of dropping an require happening. Pull the locking up into the
callers as far as possible to streamline the lot.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124743.755256708@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/sched/core.c | 76 ++++++++++++++++++++++++-----------------------------
 1 file changed, 34 insertions(+), 42 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 26637c9..1ddc129 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1065,10 +1065,8 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
  *
  * Returns (locked) new rq. Old rq's lock is released.
  */
-static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
 {
-	struct rq *rq = task_rq(p);
-
 	lockdep_assert_held(&rq->lock);
 
 	dequeue_task(rq, p, 0);
@@ -1100,41 +1098,19 @@ struct migration_arg {
  *
  * So we race with normal scheduler movements, but that's OK, as long
  * as the task is no longer on this CPU.
- *
- * Returns non-zero if task was successfully migrated.
  */
-static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
 {
-	struct rq *rq;
-	int ret = 0;
-
 	if (unlikely(!cpu_active(dest_cpu)))
-		return ret;
-
-	rq = cpu_rq(src_cpu);
-
-	raw_spin_lock(&p->pi_lock);
-	raw_spin_lock(&rq->lock);
-	/* Already moved. */
-	if (task_cpu(p) != src_cpu)
-		goto done;
+		return rq;
 
 	/* Affinity changed (again). */
 	if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
-		goto fail;
+		return rq;
 
-	/*
-	 * If we're not on a rq, the next wake-up will ensure we're
-	 * placed properly.
-	 */
-	if (task_on_rq_queued(p))
-		rq = move_queued_task(p, dest_cpu);
-done:
-	ret = 1;
-fail:
-	raw_spin_unlock(&rq->lock);
-	raw_spin_unlock(&p->pi_lock);
-	return ret;
+	rq = move_queued_task(rq, p, dest_cpu);
+
+	return rq;
 }
 
 /*
@@ -1145,6 +1121,8 @@ fail:
 static int migration_cpu_stop(void *data)
 {
 	struct migration_arg *arg = data;
+	struct task_struct *p = arg->task;
+	struct rq *rq = this_rq();
 
 	/*
 	 * The original target cpu might have gone down and we might
@@ -1157,7 +1135,19 @@ static int migration_cpu_stop(void *data)
 	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
 	 */
 	sched_ttwu_pending();
-	__migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
+
+	raw_spin_lock(&p->pi_lock);
+	raw_spin_lock(&rq->lock);
+	/*
+	 * If task_rq(p) != rq, it cannot be migrated here, because we're
+	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
+	 * we're holding p->pi_lock.
+	 */
+	if (task_rq(p) == rq && task_on_rq_queued(p))
+		rq = __migrate_task(rq, p, arg->dest_cpu);
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock(&p->pi_lock);
+
 	local_irq_enable();
 	return 0;
 }
@@ -1212,7 +1202,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 		tlb_migrate_finish(p->mm);
 		return 0;
 	} else if (task_on_rq_queued(p))
-		rq = move_queued_task(p, dest_cpu);
+		rq = move_queued_task(rq, p, dest_cpu);
 out:
 	task_rq_unlock(rq, p, &flags);
 
@@ -5043,9 +5033,9 @@ static struct task_struct fake_task = {
  * there's no concurrency possible, we hold the required locks anyway
  * because of lock validation efforts.
  */
-static void migrate_tasks(unsigned int dead_cpu)
+static void migrate_tasks(struct rq *dead_rq)
 {
-	struct rq *rq = cpu_rq(dead_cpu);
+	struct rq *rq = dead_rq;
 	struct task_struct *next, *stop = rq->stop;
 	int dest_cpu;
 
@@ -5067,7 +5057,7 @@ static void migrate_tasks(unsigned int dead_cpu)
 	 */
 	update_rq_clock(rq);
 
-	for ( ; ; ) {
+	for (;;) {
 		/*
 		 * There's this thread running, bail when that's the only
 		 * remaining thread.
@@ -5080,12 +5070,14 @@ static void migrate_tasks(unsigned int dead_cpu)
 		next->sched_class->put_prev_task(rq, next);
 
 		/* Find suitable destination for @next, with force if needed. */
-		dest_cpu = select_fallback_rq(dead_cpu, next);
-		raw_spin_unlock(&rq->lock);
-
-		__migrate_task(next, dead_cpu, dest_cpu);
+		dest_cpu = select_fallback_rq(dead_rq->cpu, next);
 
-		raw_spin_lock(&rq->lock);
+		rq = __migrate_task(rq, next, dest_cpu);
+		if (rq != dead_rq) {
+			raw_spin_unlock(&rq->lock);
+			rq = dead_rq;
+			raw_spin_lock(&rq->lock);
+		}
 	}
 
 	rq->stop = stop;
@@ -5337,7 +5329,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 			set_rq_offline(rq);
 		}
-		migrate_tasks(cpu);
+		migrate_tasks(rq);
 		BUG_ON(rq->nr_running != 1); /* the migration thread */
 		raw_spin_unlock_irqrestore(&rq->lock, flags);
 		break;

  reply	other threads:[~2015-06-18 23:03 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-11 12:46 [PATCH 00/18] sched: balance callbacks v4 Peter Zijlstra
2015-06-11 12:46 ` [PATCH 01/18] sched: Replace post_schedule with a balance callback list Peter Zijlstra
2015-06-11 15:32   ` Kirill Tkhai
2015-06-18 23:00   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 02/18] sched: Use replace normalize_task() with __sched_setscheduler() Peter Zijlstra
2015-06-18 23:00   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 03/18] sched: Allow balance callbacks for check_class_changed() Peter Zijlstra
2015-06-18 23:01   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 04/18] sched,rt: Remove return value from pull_rt_task() Peter Zijlstra
2015-06-18 23:01   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 05/18] sched,rt: Convert switched_{from,to}_rt() / prio_changed_rt() to balance callbacks Peter Zijlstra
2015-06-18 23:01   ` [tip:sched/hrtimers] sched, rt: Convert switched_{from, to}_rt() " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 06/18] sched,dl: Remove return value from pull_dl_task() Peter Zijlstra
2015-06-18 23:02   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 07/18] sched,dl: Convert switched_{from,to}_dl() / prio_changed_dl() to balance callbacks Peter Zijlstra
2015-06-18 23:02   ` [tip:sched/hrtimers] sched, dl: Convert switched_{from, to}_dl() " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 08/18] hrtimer: Remove HRTIMER_STATE_MIGRATE Peter Zijlstra
2015-06-18 22:18   ` [tip:timers/core] " tip-bot for Oleg Nesterov
2015-06-11 12:46 ` [PATCH 09/18] hrtimer: Fix hrtimer_is_queued() hole Peter Zijlstra
2015-06-18 22:18   ` [tip:timers/core] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 10/18] seqcount: Rename write_seqcount_barrier() Peter Zijlstra
2015-06-18 22:19   ` [tip:timers/core] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 11/18] seqcount: Introduce raw_write_seqcount_barrier() Peter Zijlstra
2015-06-11 15:33   ` Paul E. McKenney
2015-06-11 21:45     ` Paul E. McKenney
2015-06-12  7:08       ` Peter Zijlstra
2015-06-12 18:59       ` Oleg Nesterov
2015-06-17 12:29       ` Peter Zijlstra
2015-06-17 14:57         ` Paul E. McKenney
2015-06-17 15:11           ` Peter Zijlstra
2015-06-17 15:42             ` Paul E. McKenney
2015-06-17 16:58               ` Peter Zijlstra
2015-06-17 15:49           ` Peter Zijlstra
2015-06-17 16:37             ` Paul E. McKenney
2015-06-17 17:11               ` Peter Zijlstra
2015-06-17 18:02                 ` Paul E. McKenney
2015-06-18  9:15                   ` Peter Zijlstra
2015-06-18  9:40                     ` Ingo Molnar
2015-06-18 10:40                       ` Peter Zijlstra
2015-06-18 16:54                         ` Paul E. McKenney
2015-06-18 17:10                           ` Steven Rostedt
2015-06-18 17:51                             ` Paul E. McKenney
2015-06-18 22:19         ` [tip:timers/core] seqcount: Introduce raw_write_seqcount_barrier( ) tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 12/18] hrtimer: Allow hrtimer::function() to free the timer Peter Zijlstra
2015-06-18 22:19   ` [tip:timers/core] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 13/18] sched,dl: Fix sched class hopping CBS hole Peter Zijlstra
2015-06-18 23:02   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 14/18] sched: Move code around Peter Zijlstra
2015-06-18 23:02   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 15/18] sched: Streamline the task migration locking a little Peter Zijlstra
2015-06-18 23:03   ` tip-bot for Peter Zijlstra [this message]
2015-06-11 12:46 ` [PATCH 16/18] lockdep: Simplify lock_release() Peter Zijlstra
2015-06-18 23:03   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 17/18] lockdep: Implement lock pinning Peter Zijlstra
2015-06-18 23:03   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-06-11 12:46 ` [PATCH 18/18] sched,lockdep: Employ " Peter Zijlstra
2015-06-18 23:04   ` [tip:sched/hrtimers] " tip-bot for Peter Zijlstra
2015-12-29  5:41 ` [PATCH 00/18] sched: balance callbacks v4 Byungchul Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tip-5e16bbc2fb4053755705da5dd3557bbc0e5ccef6@git.kernel.org \
    --to=tipbot@zytor.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.