static inline void rt_set_overload(struct rq *rq)
{
+ if (!rq->online)
+ return;
+
cpu_set(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
static inline void rt_clear_overload(struct rq *rq)
{
+ if (!rq->online)
+ return;
+
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpu_clear(rq->cpu, rq->rd->rto_mask);
if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
struct rq *rq = rq_of_rt_rq(rt_rq);
rt_rq->highest_prio = rt_se_prio(rt_se);
- cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se));
+
+ if (rq->online)
+ cpupri_set(&rq->rd->cpupri, rq->cpu,
+ rt_se_prio(rt_se));
}
#endif
#ifdef CONFIG_SMP
if (rt_rq->highest_prio != highest_prio) {
struct rq *rq = rq_of_rt_rq(rt_rq);
- cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio);
+
+ if (rq->online)
+ cpupri_set(&rq->rd->cpupri, rq->cpu,
+ rt_rq->highest_prio);
}
update_rt_migration(rq_of_rt_rq(rt_rq));
}
/* Assumes rq->lock is held */
-static void join_domain_rt(struct rq *rq)
+static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
}
/* Assumes rq->lock is held */
-static void leave_domain_rt(struct rq *rq)
+static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
.load_balance = load_balance_rt,
.move_one_task = move_one_task_rt,
.set_cpus_allowed = set_cpus_allowed_rt,
- .join_domain = join_domain_rt,
- .leave_domain = leave_domain_rt,
+ .rq_online = rq_online_rt,
+ .rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,