sched: Make migration_call() safe for stop_machine()-free hotplug

The CPU_DYING branch of migration_call() relies on the fact that
CPU-hotplug offline operations use stop_machine().  This commit therefore
attempts to remedy this situation by acquiring the relevant runqueue
locks.  Note that sched_ttwu_pending() remains outside of the scope of
these new runqueue-lock critical sections because (1) sched_ttwu_pending()
does its own runqueue-lock acquisition and (2) sched_ttwu_pending() handles
pending wakeups, and no further wakeups can select this CPU because it
is already marked as offline.

Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index eaead2d..2e7797a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5175,10 +5175,8 @@
  * their home CPUs. So we just add the counter to another CPU's counter,
  * to keep the global sum constant after CPU-down:
  */
-static void migrate_nr_uninterruptible(struct rq *rq_src)
+static void migrate_nr_uninterruptible(struct rq *rq_src, struct rq *rq_dest)
 {
-	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
-
 	rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
 	rq_src->nr_uninterruptible = 0;
 }
@@ -5200,7 +5198,7 @@
  * there's no concurrency possible, we hold the required locks anyway
  * because of lock validation efforts.
  */
-static void migrate_tasks(unsigned int dead_cpu)
+static void migrate_tasks(unsigned int dead_cpu, struct rq *rq_dest)
 {
 	struct rq *rq = cpu_rq(dead_cpu);
 	struct task_struct *next, *stop = rq->stop;
@@ -5234,11 +5232,11 @@
 
 		/* Find suitable destination for @next, with force if needed. */
 		dest_cpu = select_fallback_rq(dead_cpu, next);
-		raw_spin_unlock(&rq->lock);
+		double_rq_unlock(rq, rq_dest);
 
 		__migrate_task(next, dead_cpu, dest_cpu);
 
-		raw_spin_lock(&rq->lock);
+		double_rq_lock(rq, rq_dest);
 	}
 
 	rq->stop = stop;
@@ -5452,6 +5450,7 @@
 	int cpu = (long)hcpu;
 	unsigned long flags;
 	struct rq *rq = cpu_rq(cpu);
+	struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 
@@ -5474,17 +5473,19 @@
 	case CPU_DYING:
 		sched_ttwu_pending();
 		/* Update our root-domain */
-		raw_spin_lock_irqsave(&rq->lock, flags);
+		local_irq_save(flags);
+		double_rq_lock(rq, rq_dest);
 		if (rq->rd) {
 			BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 			set_rq_offline(rq);
 		}
-		migrate_tasks(cpu);
+		migrate_tasks(cpu, rq_dest);
 		BUG_ON(rq->nr_running != 1); /* the migration thread */
-		raw_spin_unlock_irqrestore(&rq->lock, flags);
 
-		migrate_nr_uninterruptible(rq);
+		migrate_nr_uninterruptible(rq, rq_dest);
 		calc_global_load_remove(rq);
+		double_rq_unlock(rq, rq_dest);
+		local_irq_restore(flags);
 		break;
 #endif
 	}