2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 static cpumask_t rt_overload_mask;
8 static atomic_t rto_count;
9 static inline int rt_overloaded(void)
11 return atomic_read(&rto_count);
13 static inline cpumask_t *rt_overload(void)
15 return &rt_overload_mask;
17 static inline void rt_set_overload(struct rq *rq)
19 rq->rt.overloaded = 1;
20 cpu_set(rq->cpu, rt_overload_mask);
22 * Make sure the mask is visible before we set
23 * the overload count. That is checked to determine
24 * if we should look at the mask. It would be a shame
25 * if we looked at the mask, but the mask was not
29 atomic_inc(&rto_count);
31 static inline void rt_clear_overload(struct rq *rq)
33 /* the order here really doesn't matter */
34 atomic_dec(&rto_count);
35 cpu_clear(rq->cpu, rt_overload_mask);
36 rq->rt.overloaded = 0;
39 static void update_rt_migration(struct rq *rq)
41 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
44 rt_clear_overload(rq);
46 #endif /* CONFIG_SMP */
49 * Update the current task's runtime statistics. Skip current tasks that
50 * are not in our scheduling class.
52 static void update_curr_rt(struct rq *rq)
54 struct task_struct *curr = rq->curr;
57 if (!task_has_rt_policy(curr))
60 delta_exec = rq->clock - curr->se.exec_start;
61 if (unlikely((s64)delta_exec < 0))
64 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
66 curr->se.sum_exec_runtime += delta_exec;
67 curr->se.exec_start = rq->clock;
68 cpuacct_charge(curr, delta_exec);
71 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
74 rq->rt.rt_nr_running++;
76 if (p->prio < rq->rt.highest_prio)
77 rq->rt.highest_prio = p->prio;
78 if (p->nr_cpus_allowed > 1)
79 rq->rt.rt_nr_migratory++;
81 update_rt_migration(rq);
82 #endif /* CONFIG_SMP */
85 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
88 WARN_ON(!rq->rt.rt_nr_running);
89 rq->rt.rt_nr_running--;
91 if (rq->rt.rt_nr_running) {
92 struct rt_prio_array *array;
94 WARN_ON(p->prio < rq->rt.highest_prio);
95 if (p->prio == rq->rt.highest_prio) {
97 array = &rq->rt.active;
99 sched_find_first_bit(array->bitmap);
100 } /* otherwise leave rq->highest prio alone */
102 rq->rt.highest_prio = MAX_RT_PRIO;
103 if (p->nr_cpus_allowed > 1)
104 rq->rt.rt_nr_migratory--;
106 update_rt_migration(rq);
107 #endif /* CONFIG_SMP */
110 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
112 struct rt_prio_array *array = &rq->rt.active;
114 list_add_tail(&p->run_list, array->queue + p->prio);
115 __set_bit(p->prio, array->bitmap);
116 inc_cpu_load(rq, p->se.load.weight);
122 * Adding/removing a task to/from a priority array:
124 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
126 struct rt_prio_array *array = &rq->rt.active;
130 list_del(&p->run_list);
131 if (list_empty(array->queue + p->prio))
132 __clear_bit(p->prio, array->bitmap);
133 dec_cpu_load(rq, p->se.load.weight);
139 * Put task to the end of the run list without the overhead of dequeue
140 * followed by enqueue.
142 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
144 struct rt_prio_array *array = &rq->rt.active;
146 list_move_tail(&p->run_list, array->queue + p->prio);
150 yield_task_rt(struct rq *rq)
152 requeue_task_rt(rq, rq->curr);
156 static int find_lowest_rq(struct task_struct *task);
158 static int select_task_rq_rt(struct task_struct *p, int sync)
160 struct rq *rq = task_rq(p);
163 * If the current task is an RT task, then
164 * try to see if we can wake this RT task up on another
165 * runqueue. Otherwise simply start this RT task
166 * on its current runqueue.
168 * We want to avoid overloading runqueues. Even if
169 * the RT task is of higher priority than the current RT task.
170 * RT tasks behave differently than other tasks. If
171 * one gets preempted, we try to push it off to another queue.
172 * So trying to keep a preempting RT task on the same
173 * cache hot CPU will force the running RT task to
174 * a cold CPU. So we waste all the cache for the lower
175 * RT task in hopes of saving some of a RT task
176 * that is just being woken and probably will have
179 if (unlikely(rt_task(rq->curr)) &&
180 (p->nr_cpus_allowed > 1)) {
181 int cpu = find_lowest_rq(p);
183 return (cpu == -1) ? task_cpu(p) : cpu;
187 * Otherwise, just let it ride on the affined RQ and the
188 * post-schedule router will push the preempted task away
192 #endif /* CONFIG_SMP */
195 * Preempt the current task with a newly woken task if needed:
197 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
199 if (p->prio < rq->curr->prio)
200 resched_task(rq->curr);
203 static struct task_struct *pick_next_task_rt(struct rq *rq)
205 struct rt_prio_array *array = &rq->rt.active;
206 struct task_struct *next;
207 struct list_head *queue;
210 idx = sched_find_first_bit(array->bitmap);
211 if (idx >= MAX_RT_PRIO)
214 queue = array->queue + idx;
215 next = list_entry(queue->next, struct task_struct, run_list);
217 next->se.exec_start = rq->clock;
222 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
225 p->se.exec_start = 0;
229 /* Only try algorithms three times */
230 #define RT_MAX_TRIES 3
232 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
233 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
235 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
237 if (!task_running(rq, p) &&
238 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
239 (p->nr_cpus_allowed > 1))
244 /* Return the second highest RT task, NULL otherwise */
245 static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
248 struct rt_prio_array *array = &rq->rt.active;
249 struct task_struct *next;
250 struct list_head *queue;
253 assert_spin_locked(&rq->lock);
255 if (likely(rq->rt.rt_nr_running < 2))
258 idx = sched_find_first_bit(array->bitmap);
259 if (unlikely(idx >= MAX_RT_PRIO)) {
260 WARN_ON(1); /* rt_nr_running is bad */
264 queue = array->queue + idx;
265 BUG_ON(list_empty(queue));
267 next = list_entry(queue->next, struct task_struct, run_list);
268 if (unlikely(pick_rt_task(rq, next, cpu)))
271 if (queue->next->next != queue) {
273 next = list_entry(queue->next->next, struct task_struct, run_list);
274 if (pick_rt_task(rq, next, cpu))
279 /* slower, but more flexible */
280 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
281 if (unlikely(idx >= MAX_RT_PRIO))
284 queue = array->queue + idx;
285 BUG_ON(list_empty(queue));
287 list_for_each_entry(next, queue, run_list) {
288 if (pick_rt_task(rq, next, cpu))
298 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
299 static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);
301 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
304 cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
305 int lowest_prio = -1;
308 cpus_clear(*lowest_mask);
309 cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
312 * Scan each rq for the lowest prio.
314 for_each_cpu_mask(cpu, *valid_mask) {
315 struct rq *rq = cpu_rq(cpu);
317 /* We look for lowest RT prio or non-rt CPU */
318 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
320 cpus_clear(*lowest_mask);
321 cpu_set(rq->cpu, *lowest_mask);
325 /* no locking for now */
326 if ((rq->rt.highest_prio > task->prio)
327 && (rq->rt.highest_prio >= lowest_prio)) {
328 if (rq->rt.highest_prio > lowest_prio) {
329 /* new low - clear old data */
330 lowest_prio = rq->rt.highest_prio;
332 cpus_clear(*lowest_mask);
336 cpu_set(rq->cpu, *lowest_mask);
344 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
348 /* "this_cpu" is cheaper to preempt than a remote processor */
349 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
352 first = first_cpu(*mask);
353 if (first != NR_CPUS)
359 static int find_lowest_rq(struct task_struct *task)
361 struct sched_domain *sd;
362 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
363 int this_cpu = smp_processor_id();
364 int cpu = task_cpu(task);
365 int count = find_lowest_cpus(task, lowest_mask);
368 return -1; /* No targets found */
371 * There is no sense in performing an optimal search if only one
375 return first_cpu(*lowest_mask);
378 * At this point we have built a mask of cpus representing the
379 * lowest priority tasks in the system. Now we want to elect
380 * the best one based on our affinity and topology.
382 * We prioritize the last cpu that the task executed on since
383 * it is most likely cache-hot in that location.
385 if (cpu_isset(cpu, *lowest_mask))
389 * Otherwise, we consult the sched_domains span maps to figure
390 * out which cpu is logically closest to our hot cache data.
393 this_cpu = -1; /* Skip this_cpu opt if the same */
395 for_each_domain(cpu, sd) {
396 if (sd->flags & SD_WAKE_AFFINE) {
397 cpumask_t domain_mask;
400 cpus_and(domain_mask, sd->span, *lowest_mask);
402 best_cpu = pick_optimal_cpu(this_cpu,
410 * And finally, if there were no matches within the domains
411 * just give the caller *something* to work with from the compatible
414 return pick_optimal_cpu(this_cpu, lowest_mask);
417 /* Will lock the rq it finds */
418 static struct rq *find_lock_lowest_rq(struct task_struct *task,
421 struct rq *lowest_rq = NULL;
425 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
426 cpu = find_lowest_rq(task);
428 if ((cpu == -1) || (cpu == rq->cpu))
431 lowest_rq = cpu_rq(cpu);
433 /* if the prio of this runqueue changed, try again */
434 if (double_lock_balance(rq, lowest_rq)) {
436 * We had to unlock the run queue. In
437 * the mean time, task could have
438 * migrated already or had its affinity changed.
439 * Also make sure that it wasn't scheduled on its rq.
441 if (unlikely(task_rq(task) != rq ||
442 !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
443 task_running(rq, task) ||
445 spin_unlock(&lowest_rq->lock);
451 /* If this rq is still suitable use it. */
452 if (lowest_rq->rt.highest_prio > task->prio)
456 spin_unlock(&lowest_rq->lock);
464 * If the current CPU has more than one RT task, see if the non
465 * running task can migrate over to a CPU that is running a task
466 * of lesser priority.
468 static int push_rt_task(struct rq *rq)
470 struct task_struct *next_task;
471 struct rq *lowest_rq;
473 int paranoid = RT_MAX_TRIES;
475 assert_spin_locked(&rq->lock);
477 if (!rq->rt.overloaded)
480 next_task = pick_next_highest_task_rt(rq, -1);
485 if (unlikely(next_task == rq->curr)) {
491 * It's possible that the next_task slipped in of
492 * higher priority than current. If that's the case
493 * just reschedule current.
495 if (unlikely(next_task->prio < rq->curr->prio)) {
496 resched_task(rq->curr);
500 /* We might release rq lock */
501 get_task_struct(next_task);
503 /* find_lock_lowest_rq locks the rq if found */
504 lowest_rq = find_lock_lowest_rq(next_task, rq);
506 struct task_struct *task;
508 * find lock_lowest_rq releases rq->lock
509 * so it is possible that next_task has changed.
510 * If it has, then try again.
512 task = pick_next_highest_task_rt(rq, -1);
513 if (unlikely(task != next_task) && task && paranoid--) {
514 put_task_struct(next_task);
521 assert_spin_locked(&lowest_rq->lock);
523 deactivate_task(rq, next_task, 0);
524 set_task_cpu(next_task, lowest_rq->cpu);
525 activate_task(lowest_rq, next_task, 0);
527 resched_task(lowest_rq->curr);
529 spin_unlock(&lowest_rq->lock);
533 put_task_struct(next_task);
539 * TODO: Currently we just use the second highest prio task on
540 * the queue, and stop when it can't migrate (or there's
541 * no more RT tasks). There may be a case where a lower
542 * priority RT task has a different affinity than the
543 * higher RT task. In this case the lower RT task could
544 * possibly be able to migrate where as the higher priority
545 * RT task could not. We currently ignore this issue.
546 * Enhancements are welcome!
548 static void push_rt_tasks(struct rq *rq)
550 /* push_rt_task will return true if it moved an RT */
551 while (push_rt_task(rq))
555 static int pull_rt_task(struct rq *this_rq)
557 struct task_struct *next;
558 struct task_struct *p;
560 cpumask_t *rto_cpumask;
561 int this_cpu = this_rq->cpu;
565 assert_spin_locked(&this_rq->lock);
568 * If cpusets are used, and we have overlapping
569 * run queue cpusets, then this algorithm may not catch all.
570 * This is just the price you pay on trying to keep
571 * dirtying caches down on large SMP machines.
573 if (likely(!rt_overloaded()))
576 next = pick_next_task_rt(this_rq);
578 rto_cpumask = rt_overload();
580 for_each_cpu_mask(cpu, *rto_cpumask) {
584 src_rq = cpu_rq(cpu);
585 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
587 * It is possible that overlapping cpusets
588 * will miss clearing a non overloaded runqueue.
591 if (double_lock_balance(this_rq, src_rq)) {
592 /* unlocked our runqueue lock */
593 struct task_struct *old_next = next;
594 next = pick_next_task_rt(this_rq);
595 if (next != old_next)
598 if (likely(src_rq->rt.rt_nr_running <= 1))
600 * Small chance that this_rq->curr changed
601 * but it's really harmless here.
603 rt_clear_overload(this_rq);
606 * Heh, the src_rq is now overloaded, since
607 * we already have the src_rq lock, go straight
608 * to pulling tasks from it.
611 spin_unlock(&src_rq->lock);
616 * We can potentially drop this_rq's lock in
617 * double_lock_balance, and another CPU could
618 * steal our next task - hence we must cause
619 * the caller to recalculate the next task
622 if (double_lock_balance(this_rq, src_rq)) {
623 struct task_struct *old_next = next;
624 next = pick_next_task_rt(this_rq);
625 if (next != old_next)
630 * Are there still pullable RT tasks?
632 if (src_rq->rt.rt_nr_running <= 1) {
633 spin_unlock(&src_rq->lock);
638 p = pick_next_highest_task_rt(src_rq, this_cpu);
641 * Do we have an RT task that preempts
642 * the to-be-scheduled task?
644 if (p && (!next || (p->prio < next->prio))) {
645 WARN_ON(p == src_rq->curr);
646 WARN_ON(!p->se.on_rq);
649 * There's a chance that p is higher in priority
650 * than what's currently running on its cpu.
651 * This is just that p is wakeing up and hasn't
652 * had a chance to schedule. We only pull
653 * p if it is lower in priority than the
654 * current task on the run queue or
655 * this_rq next task is lower in prio than
656 * the current task on that rq.
658 if (p->prio < src_rq->curr->prio ||
659 (next && next->prio < src_rq->curr->prio))
664 deactivate_task(src_rq, p, 0);
665 set_task_cpu(p, this_cpu);
666 activate_task(this_rq, p, 0);
668 * We continue with the search, just in
669 * case there's an even higher prio task
670 * in another runqueue. (low likelyhood
675 * Update next so that we won't pick a task
676 * on another cpu with a priority lower (or equal)
677 * than the one we just picked.
683 spin_unlock(&src_rq->lock);
689 static void schedule_balance_rt(struct rq *rq,
690 struct task_struct *prev)
692 /* Try to pull RT tasks here if we lower this rq's prio */
693 if (unlikely(rt_task(prev)) &&
694 rq->rt.highest_prio > prev->prio)
698 static void schedule_tail_balance_rt(struct rq *rq)
701 * If we have more than one rt_task queued, then
702 * see if we can push the other rt_tasks off to other CPUS.
703 * Note we may release the rq lock, and since
704 * the lock was owned by prev, we need to release it
705 * first via finish_lock_switch and then reaquire it here.
707 if (unlikely(rq->rt.overloaded)) {
708 spin_lock_irq(&rq->lock);
710 spin_unlock_irq(&rq->lock);
715 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
717 if (unlikely(rt_task(p)) &&
718 !task_running(rq, p) &&
719 (p->prio >= rq->rt.highest_prio) &&
725 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
726 unsigned long max_load_move,
727 struct sched_domain *sd, enum cpu_idle_type idle,
728 int *all_pinned, int *this_best_prio)
730 /* don't touch RT tasks */
735 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
736 struct sched_domain *sd, enum cpu_idle_type idle)
738 /* don't touch RT tasks */
741 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
743 int weight = cpus_weight(*new_mask);
748 * Update the migration status of the RQ if we have an RT task
749 * which is running AND changing its weight value.
751 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
752 struct rq *rq = task_rq(p);
754 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
755 rq->rt.rt_nr_migratory++;
756 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
757 BUG_ON(!rq->rt.rt_nr_migratory);
758 rq->rt.rt_nr_migratory--;
761 update_rt_migration(rq);
764 p->cpus_allowed = *new_mask;
765 p->nr_cpus_allowed = weight;
767 #else /* CONFIG_SMP */
768 # define schedule_tail_balance_rt(rq) do { } while (0)
769 # define schedule_balance_rt(rq, prev) do { } while (0)
770 # define wakeup_balance_rt(rq, p) do { } while (0)
771 #endif /* CONFIG_SMP */
773 static void task_tick_rt(struct rq *rq, struct task_struct *p)
778 * RR tasks need a special form of timeslice management.
779 * FIFO tasks have no timeslices.
781 if (p->policy != SCHED_RR)
787 p->time_slice = DEF_TIMESLICE;
790 * Requeue to the end of queue if we are not the only element
793 if (p->run_list.prev != p->run_list.next) {
794 requeue_task_rt(rq, p);
795 set_tsk_need_resched(p);
799 static void set_curr_task_rt(struct rq *rq)
801 struct task_struct *p = rq->curr;
803 p->se.exec_start = rq->clock;
806 const struct sched_class rt_sched_class = {
807 .next = &fair_sched_class,
808 .enqueue_task = enqueue_task_rt,
809 .dequeue_task = dequeue_task_rt,
810 .yield_task = yield_task_rt,
812 .select_task_rq = select_task_rq_rt,
813 #endif /* CONFIG_SMP */
815 .check_preempt_curr = check_preempt_curr_rt,
817 .pick_next_task = pick_next_task_rt,
818 .put_prev_task = put_prev_task_rt,
821 .load_balance = load_balance_rt,
822 .move_one_task = move_one_task_rt,
823 .set_cpus_allowed = set_cpus_allowed_rt,
826 .set_curr_task = set_curr_task_rt,
827 .task_tick = task_tick_rt,