int push_cpu;
        /* cpu of this runqueue: */
        int cpu;
+       int online;
 
        struct task_struct *migration_thread;
        struct list_head migration_queue;
 #endif
 
 #define sched_class_highest (&rt_sched_class)
+#define for_each_class(class) \
+   for (class = sched_class_highest; class; class = class->next)
 
 static inline void inc_load(struct rq *rq, const struct task_struct *p)
 {
 }
 #endif
 
+static void set_rq_online(struct rq *rq)
+{
+       if (!rq->online) {
+               const struct sched_class *class;
+
+               cpu_set(rq->cpu, rq->rd->online);
+               rq->online = 1;
+
+               for_each_class(class) {
+                       if (class->rq_online)
+                               class->rq_online(rq);
+               }
+       }
+}
+
+static void set_rq_offline(struct rq *rq)
+{
+       if (rq->online) {
+               const struct sched_class *class;
+
+               for_each_class(class) {
+                       if (class->rq_offline)
+                               class->rq_offline(rq);
+               }
+
+               cpu_clear(rq->cpu, rq->rd->online);
+               rq->online = 0;
+       }
+}
+
 /*
  * migration_call - callback that gets triggered when a CPU is added.
  * Here we can start up the necessary migration thread for the new CPU.
                spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpu_isset(cpu, rq->rd->span));
-                       cpu_set(cpu, rq->rd->online);
+
+                       set_rq_online(rq);
                }
                spin_unlock_irqrestore(&rq->lock, flags);
                break;
                spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpu_isset(cpu, rq->rd->span));
-                       cpu_clear(cpu, rq->rd->online);
+                       set_rq_offline(rq);
                }
                spin_unlock_irqrestore(&rq->lock, flags);
                break;
 static void rq_attach_root(struct rq *rq, struct root_domain *rd)
 {
        unsigned long flags;
-       const struct sched_class *class;
 
        spin_lock_irqsave(&rq->lock, flags);
 
        if (rq->rd) {
                struct root_domain *old_rd = rq->rd;
 
-               for (class = sched_class_highest; class; class = class->next) {
-                       if (class->leave_domain)
-                               class->leave_domain(rq);
-               }
+               if (cpu_isset(rq->cpu, old_rd->online))
+                       set_rq_offline(rq);
 
                cpu_clear(rq->cpu, old_rd->span);
-               cpu_clear(rq->cpu, old_rd->online);
 
                if (atomic_dec_and_test(&old_rd->refcount))
                        kfree(old_rd);
 
        cpu_set(rq->cpu, rd->span);
        if (cpu_isset(rq->cpu, cpu_online_map))
-               cpu_set(rq->cpu, rd->online);
-
-       for (class = sched_class_highest; class; class = class->next) {
-               if (class->join_domain)
-                       class->join_domain(rq);
-       }
+               set_rq_online(rq);
 
        spin_unlock_irqrestore(&rq->lock, flags);
 }
                rq->next_balance = jiffies;
                rq->push_cpu = 0;
                rq->cpu = i;
+               rq->online = 0;
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
                rq_attach_root(rq, &def_root_domain);
 
 
 static inline void rt_set_overload(struct rq *rq)
 {
+       if (!rq->online)
+               return;
+
        cpu_set(rq->cpu, rq->rd->rto_mask);
        /*
         * Make sure the mask is visible before we set
 
 static inline void rt_clear_overload(struct rq *rq)
 {
+       if (!rq->online)
+               return;
+
        /* the order here really doesn't matter */
        atomic_dec(&rq->rd->rto_count);
        cpu_clear(rq->cpu, rq->rd->rto_mask);
        if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
                struct rq *rq = rq_of_rt_rq(rt_rq);
                rt_rq->highest_prio = rt_se_prio(rt_se);
-               cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se));
+
+               if (rq->online)
+                       cpupri_set(&rq->rd->cpupri, rq->cpu,
+                                  rt_se_prio(rt_se));
        }
 #endif
 #ifdef CONFIG_SMP
 
        if (rt_rq->highest_prio != highest_prio) {
                struct rq *rq = rq_of_rt_rq(rt_rq);
-               cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio);
+
+               if (rq->online)
+                       cpupri_set(&rq->rd->cpupri, rq->cpu,
+                                  rt_rq->highest_prio);
        }
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 }
 
 /* Assumes rq->lock is held */
-static void join_domain_rt(struct rq *rq)
+static void rq_online_rt(struct rq *rq)
 {
        if (rq->rt.overloaded)
                rt_set_overload(rq);
 }
 
 /* Assumes rq->lock is held */
-static void leave_domain_rt(struct rq *rq)
+static void rq_offline_rt(struct rq *rq)
 {
        if (rq->rt.overloaded)
                rt_clear_overload(rq);
        .load_balance           = load_balance_rt,
        .move_one_task          = move_one_task_rt,
        .set_cpus_allowed       = set_cpus_allowed_rt,
-       .join_domain            = join_domain_rt,
-       .leave_domain           = leave_domain_rt,
+       .rq_online              = rq_online_rt,
+       .rq_offline             = rq_offline_rt,
        .pre_schedule           = pre_schedule_rt,
        .post_schedule          = post_schedule_rt,
        .task_wake_up           = task_wake_up_rt,