]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/sched_rt.c
sched: rework of "prioritize non-migratable tasks over migratable ones"
[linux-2.6-omap-h63xx.git] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         if (!rq->online)
16                 return;
17
18         cpu_set(rq->cpu, rq->rd->rto_mask);
19         /*
20          * Make sure the mask is visible before we set
21          * the overload count. That is checked to determine
22          * if we should look at the mask. It would be a shame
23          * if we looked at the mask, but the mask was not
24          * updated yet.
25          */
26         wmb();
27         atomic_inc(&rq->rd->rto_count);
28 }
29
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32         if (!rq->online)
33                 return;
34
35         /* the order here really doesn't matter */
36         atomic_dec(&rq->rd->rto_count);
37         cpu_clear(rq->cpu, rq->rd->rto_mask);
38 }
39
40 static void update_rt_migration(struct rq *rq)
41 {
42         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43                 if (!rq->rt.overloaded) {
44                         rt_set_overload(rq);
45                         rq->rt.overloaded = 1;
46                 }
47         } else if (rq->rt.overloaded) {
48                 rt_clear_overload(rq);
49                 rq->rt.overloaded = 0;
50         }
51 }
52 #endif /* CONFIG_SMP */
53
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
55 {
56         return container_of(rt_se, struct task_struct, rt);
57 }
58
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60 {
61         return !list_empty(&rt_se->run_list);
62 }
63
64 #ifdef CONFIG_RT_GROUP_SCHED
65
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
67 {
68         if (!rt_rq->tg)
69                 return RUNTIME_INF;
70
71         return rt_rq->rt_runtime;
72 }
73
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75 {
76         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
77 }
78
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83 {
84         return rt_rq->rq;
85 }
86
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88 {
89         return rt_se->rt_rq;
90 }
91
92 #define for_each_sched_rt_entity(rt_se) \
93         for (; rt_se; rt_se = rt_se->parent)
94
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96 {
97         return rt_se->my_q;
98 }
99
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104 {
105         struct sched_rt_entity *rt_se = rt_rq->rt_se;
106
107         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
109
110                 enqueue_rt_entity(rt_se);
111                 if (rt_rq->highest_prio < curr->prio)
112                         resched_task(curr);
113         }
114 }
115
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
117 {
118         struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120         if (rt_se && on_rt_rq(rt_se))
121                 dequeue_rt_entity(rt_se);
122 }
123
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125 {
126         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127 }
128
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
130 {
131         struct rt_rq *rt_rq = group_rt_rq(rt_se);
132         struct task_struct *p;
133
134         if (rt_rq)
135                 return !!rt_rq->rt_nr_boosted;
136
137         p = rt_task_of(rt_se);
138         return p->prio != p->normal_prio;
139 }
140
141 #ifdef CONFIG_SMP
142 static inline cpumask_t sched_rt_period_mask(void)
143 {
144         return cpu_rq(smp_processor_id())->rd->span;
145 }
146 #else
147 static inline cpumask_t sched_rt_period_mask(void)
148 {
149         return cpu_online_map;
150 }
151 #endif
152
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155 {
156         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157 }
158
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160 {
161         return &rt_rq->tg->rt_bandwidth;
162 }
163
164 #else /* !CONFIG_RT_GROUP_SCHED */
165
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167 {
168         return rt_rq->rt_runtime;
169 }
170
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172 {
173         return ktime_to_ns(def_rt_bandwidth.rt_period);
174 }
175
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 {
181         return container_of(rt_rq, struct rq, rt);
182 }
183
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186         struct task_struct *p = rt_task_of(rt_se);
187         struct rq *rq = task_rq(p);
188
189         return &rq->rt;
190 }
191
192 #define for_each_sched_rt_entity(rt_se) \
193         for (; rt_se; rt_se = NULL)
194
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196 {
197         return NULL;
198 }
199
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202 }
203
204 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
205 {
206 }
207
208 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
209 {
210         return rt_rq->rt_throttled;
211 }
212
213 static inline cpumask_t sched_rt_period_mask(void)
214 {
215         return cpu_online_map;
216 }
217
218 static inline
219 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
220 {
221         return &cpu_rq(cpu)->rt;
222 }
223
224 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
225 {
226         return &def_rt_bandwidth;
227 }
228
229 #endif /* CONFIG_RT_GROUP_SCHED */
230
231 #ifdef CONFIG_SMP
232 static int do_balance_runtime(struct rt_rq *rt_rq)
233 {
234         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
235         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
236         int i, weight, more = 0;
237         u64 rt_period;
238
239         weight = cpus_weight(rd->span);
240
241         spin_lock(&rt_b->rt_runtime_lock);
242         rt_period = ktime_to_ns(rt_b->rt_period);
243         for_each_cpu_mask(i, rd->span) {
244                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245                 s64 diff;
246
247                 if (iter == rt_rq)
248                         continue;
249
250                 spin_lock(&iter->rt_runtime_lock);
251                 if (iter->rt_runtime == RUNTIME_INF)
252                         goto next;
253
254                 diff = iter->rt_runtime - iter->rt_time;
255                 if (diff > 0) {
256                         do_div(diff, weight);
257                         if (rt_rq->rt_runtime + diff > rt_period)
258                                 diff = rt_period - rt_rq->rt_runtime;
259                         iter->rt_runtime -= diff;
260                         rt_rq->rt_runtime += diff;
261                         more = 1;
262                         if (rt_rq->rt_runtime == rt_period) {
263                                 spin_unlock(&iter->rt_runtime_lock);
264                                 break;
265                         }
266                 }
267 next:
268                 spin_unlock(&iter->rt_runtime_lock);
269         }
270         spin_unlock(&rt_b->rt_runtime_lock);
271
272         return more;
273 }
274
275 static void __disable_runtime(struct rq *rq)
276 {
277         struct root_domain *rd = rq->rd;
278         struct rt_rq *rt_rq;
279
280         if (unlikely(!scheduler_running))
281                 return;
282
283         for_each_leaf_rt_rq(rt_rq, rq) {
284                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
285                 s64 want;
286                 int i;
287
288                 spin_lock(&rt_b->rt_runtime_lock);
289                 spin_lock(&rt_rq->rt_runtime_lock);
290                 if (rt_rq->rt_runtime == RUNTIME_INF ||
291                                 rt_rq->rt_runtime == rt_b->rt_runtime)
292                         goto balanced;
293                 spin_unlock(&rt_rq->rt_runtime_lock);
294
295                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
296
297                 for_each_cpu_mask(i, rd->span) {
298                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
299                         s64 diff;
300
301                         if (iter == rt_rq)
302                                 continue;
303
304                         spin_lock(&iter->rt_runtime_lock);
305                         if (want > 0) {
306                                 diff = min_t(s64, iter->rt_runtime, want);
307                                 iter->rt_runtime -= diff;
308                                 want -= diff;
309                         } else {
310                                 iter->rt_runtime -= want;
311                                 want -= want;
312                         }
313                         spin_unlock(&iter->rt_runtime_lock);
314
315                         if (!want)
316                                 break;
317                 }
318
319                 spin_lock(&rt_rq->rt_runtime_lock);
320                 BUG_ON(want);
321 balanced:
322                 rt_rq->rt_runtime = RUNTIME_INF;
323                 spin_unlock(&rt_rq->rt_runtime_lock);
324                 spin_unlock(&rt_b->rt_runtime_lock);
325         }
326 }
327
328 static void disable_runtime(struct rq *rq)
329 {
330         unsigned long flags;
331
332         spin_lock_irqsave(&rq->lock, flags);
333         __disable_runtime(rq);
334         spin_unlock_irqrestore(&rq->lock, flags);
335 }
336
337 static void __enable_runtime(struct rq *rq)
338 {
339         struct rt_rq *rt_rq;
340
341         if (unlikely(!scheduler_running))
342                 return;
343
344         for_each_leaf_rt_rq(rt_rq, rq) {
345                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
346
347                 spin_lock(&rt_b->rt_runtime_lock);
348                 spin_lock(&rt_rq->rt_runtime_lock);
349                 rt_rq->rt_runtime = rt_b->rt_runtime;
350                 rt_rq->rt_time = 0;
351                 spin_unlock(&rt_rq->rt_runtime_lock);
352                 spin_unlock(&rt_b->rt_runtime_lock);
353         }
354 }
355
356 static void enable_runtime(struct rq *rq)
357 {
358         unsigned long flags;
359
360         spin_lock_irqsave(&rq->lock, flags);
361         __enable_runtime(rq);
362         spin_unlock_irqrestore(&rq->lock, flags);
363 }
364
365 static int balance_runtime(struct rt_rq *rt_rq)
366 {
367         int more = 0;
368
369         if (rt_rq->rt_time > rt_rq->rt_runtime) {
370                 spin_unlock(&rt_rq->rt_runtime_lock);
371                 more = do_balance_runtime(rt_rq);
372                 spin_lock(&rt_rq->rt_runtime_lock);
373         }
374
375         return more;
376 }
377 #else /* !CONFIG_SMP */
378 static inline int balance_runtime(struct rt_rq *rt_rq)
379 {
380         return 0;
381 }
382 #endif /* CONFIG_SMP */
383
384 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
385 {
386         int i, idle = 1;
387         cpumask_t span;
388
389         if (rt_b->rt_runtime == RUNTIME_INF)
390                 return 1;
391
392         span = sched_rt_period_mask();
393         for_each_cpu_mask(i, span) {
394                 int enqueue = 0;
395                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
396                 struct rq *rq = rq_of_rt_rq(rt_rq);
397
398                 spin_lock(&rq->lock);
399                 if (rt_rq->rt_time) {
400                         u64 runtime;
401
402                         spin_lock(&rt_rq->rt_runtime_lock);
403                         if (rt_rq->rt_throttled)
404                                 balance_runtime(rt_rq);
405                         runtime = rt_rq->rt_runtime;
406                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
407                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
408                                 rt_rq->rt_throttled = 0;
409                                 enqueue = 1;
410                         }
411                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
412                                 idle = 0;
413                         spin_unlock(&rt_rq->rt_runtime_lock);
414                 } else if (rt_rq->rt_nr_running)
415                         idle = 0;
416
417                 if (enqueue)
418                         sched_rt_rq_enqueue(rt_rq);
419                 spin_unlock(&rq->lock);
420         }
421
422         return idle;
423 }
424
425 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
426 {
427 #ifdef CONFIG_RT_GROUP_SCHED
428         struct rt_rq *rt_rq = group_rt_rq(rt_se);
429
430         if (rt_rq)
431                 return rt_rq->highest_prio;
432 #endif
433
434         return rt_task_of(rt_se)->prio;
435 }
436
437 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
438 {
439         u64 runtime = sched_rt_runtime(rt_rq);
440
441         if (runtime == RUNTIME_INF)
442                 return 0;
443
444         if (rt_rq->rt_throttled)
445                 return rt_rq_throttled(rt_rq);
446
447         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
448                 return 0;
449
450         balance_runtime(rt_rq);
451         runtime = sched_rt_runtime(rt_rq);
452         if (runtime == RUNTIME_INF)
453                 return 0;
454
455         if (rt_rq->rt_time > runtime) {
456                 rt_rq->rt_throttled = 1;
457                 if (rt_rq_throttled(rt_rq)) {
458                         sched_rt_rq_dequeue(rt_rq);
459                         return 1;
460                 }
461         }
462
463         return 0;
464 }
465
466 /*
467  * Update the current task's runtime statistics. Skip current tasks that
468  * are not in our scheduling class.
469  */
470 static void update_curr_rt(struct rq *rq)
471 {
472         struct task_struct *curr = rq->curr;
473         struct sched_rt_entity *rt_se = &curr->rt;
474         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
475         u64 delta_exec;
476
477         if (!task_has_rt_policy(curr))
478                 return;
479
480         delta_exec = rq->clock - curr->se.exec_start;
481         if (unlikely((s64)delta_exec < 0))
482                 delta_exec = 0;
483
484         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
485
486         curr->se.sum_exec_runtime += delta_exec;
487         curr->se.exec_start = rq->clock;
488         cpuacct_charge(curr, delta_exec);
489
490         for_each_sched_rt_entity(rt_se) {
491                 rt_rq = rt_rq_of_se(rt_se);
492
493                 spin_lock(&rt_rq->rt_runtime_lock);
494                 rt_rq->rt_time += delta_exec;
495                 if (sched_rt_runtime_exceeded(rt_rq))
496                         resched_task(curr);
497                 spin_unlock(&rt_rq->rt_runtime_lock);
498         }
499 }
500
501 static inline
502 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
503 {
504         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
505         rt_rq->rt_nr_running++;
506 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
507         if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
508                 struct rq *rq = rq_of_rt_rq(rt_rq);
509
510                 rt_rq->highest_prio = rt_se_prio(rt_se);
511 #ifdef CONFIG_SMP
512                 if (rq->online)
513                         cpupri_set(&rq->rd->cpupri, rq->cpu,
514                                    rt_se_prio(rt_se));
515 #endif
516         }
517 #endif
518 #ifdef CONFIG_SMP
519         if (rt_se->nr_cpus_allowed > 1) {
520                 struct rq *rq = rq_of_rt_rq(rt_rq);
521
522                 rq->rt.rt_nr_migratory++;
523         }
524
525         update_rt_migration(rq_of_rt_rq(rt_rq));
526 #endif
527 #ifdef CONFIG_RT_GROUP_SCHED
528         if (rt_se_boosted(rt_se))
529                 rt_rq->rt_nr_boosted++;
530
531         if (rt_rq->tg)
532                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
533 #else
534         start_rt_bandwidth(&def_rt_bandwidth);
535 #endif
536 }
537
538 static inline
539 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
540 {
541 #ifdef CONFIG_SMP
542         int highest_prio = rt_rq->highest_prio;
543 #endif
544
545         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
546         WARN_ON(!rt_rq->rt_nr_running);
547         rt_rq->rt_nr_running--;
548 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
549         if (rt_rq->rt_nr_running) {
550                 struct rt_prio_array *array;
551
552                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
553                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
554                         /* recalculate */
555                         array = &rt_rq->active;
556                         rt_rq->highest_prio =
557                                 sched_find_first_bit(array->bitmap);
558                 } /* otherwise leave rq->highest prio alone */
559         } else
560                 rt_rq->highest_prio = MAX_RT_PRIO;
561 #endif
562 #ifdef CONFIG_SMP
563         if (rt_se->nr_cpus_allowed > 1) {
564                 struct rq *rq = rq_of_rt_rq(rt_rq);
565                 rq->rt.rt_nr_migratory--;
566         }
567
568         if (rt_rq->highest_prio != highest_prio) {
569                 struct rq *rq = rq_of_rt_rq(rt_rq);
570
571                 if (rq->online)
572                         cpupri_set(&rq->rd->cpupri, rq->cpu,
573                                    rt_rq->highest_prio);
574         }
575
576         update_rt_migration(rq_of_rt_rq(rt_rq));
577 #endif /* CONFIG_SMP */
578 #ifdef CONFIG_RT_GROUP_SCHED
579         if (rt_se_boosted(rt_se))
580                 rt_rq->rt_nr_boosted--;
581
582         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
583 #endif
584 }
585
586 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
587 {
588         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
589         struct rt_prio_array *array = &rt_rq->active;
590         struct rt_rq *group_rq = group_rt_rq(rt_se);
591         struct list_head *queue = array->queue + rt_se_prio(rt_se);
592
593         /*
594          * Don't enqueue the group if its throttled, or when empty.
595          * The latter is a consequence of the former when a child group
596          * get throttled and the current group doesn't have any other
597          * active members.
598          */
599         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
600                 return;
601
602         list_add_tail(&rt_se->run_list, queue);
603         __set_bit(rt_se_prio(rt_se), array->bitmap);
604
605         inc_rt_tasks(rt_se, rt_rq);
606 }
607
608 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
609 {
610         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
611         struct rt_prio_array *array = &rt_rq->active;
612
613         list_del_init(&rt_se->run_list);
614         if (list_empty(array->queue + rt_se_prio(rt_se)))
615                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
616
617         dec_rt_tasks(rt_se, rt_rq);
618 }
619
620 /*
621  * Because the prio of an upper entry depends on the lower
622  * entries, we must remove entries top - down.
623  */
624 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
625 {
626         struct sched_rt_entity *back = NULL;
627
628         for_each_sched_rt_entity(rt_se) {
629                 rt_se->back = back;
630                 back = rt_se;
631         }
632
633         for (rt_se = back; rt_se; rt_se = rt_se->back) {
634                 if (on_rt_rq(rt_se))
635                         __dequeue_rt_entity(rt_se);
636         }
637 }
638
639 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
640 {
641         dequeue_rt_stack(rt_se);
642         for_each_sched_rt_entity(rt_se)
643                 __enqueue_rt_entity(rt_se);
644 }
645
646 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
647 {
648         dequeue_rt_stack(rt_se);
649
650         for_each_sched_rt_entity(rt_se) {
651                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
652
653                 if (rt_rq && rt_rq->rt_nr_running)
654                         __enqueue_rt_entity(rt_se);
655         }
656 }
657
658 /*
659  * Adding/removing a task to/from a priority array:
660  */
661 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
662 {
663         struct sched_rt_entity *rt_se = &p->rt;
664
665         if (wakeup)
666                 rt_se->timeout = 0;
667
668         enqueue_rt_entity(rt_se);
669
670         inc_cpu_load(rq, p->se.load.weight);
671 }
672
673 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
674 {
675         struct sched_rt_entity *rt_se = &p->rt;
676
677         update_curr_rt(rq);
678         dequeue_rt_entity(rt_se);
679
680         dec_cpu_load(rq, p->se.load.weight);
681 }
682
683 /*
684  * Put task to the end of the run list without the overhead of dequeue
685  * followed by enqueue.
686  */
687 static void
688 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
689 {
690         if (on_rt_rq(rt_se)) {
691                 struct rt_prio_array *array = &rt_rq->active;
692                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
693
694                 if (head)
695                         list_move(&rt_se->run_list, queue);
696                 else
697                         list_move_tail(&rt_se->run_list, queue);
698         }
699 }
700
701 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
702 {
703         struct sched_rt_entity *rt_se = &p->rt;
704         struct rt_rq *rt_rq;
705
706         for_each_sched_rt_entity(rt_se) {
707                 rt_rq = rt_rq_of_se(rt_se);
708                 requeue_rt_entity(rt_rq, rt_se, head);
709         }
710 }
711
712 static void yield_task_rt(struct rq *rq)
713 {
714         requeue_task_rt(rq, rq->curr, 0);
715 }
716
717 #ifdef CONFIG_SMP
718 static int find_lowest_rq(struct task_struct *task);
719
720 static int select_task_rq_rt(struct task_struct *p, int sync)
721 {
722         struct rq *rq = task_rq(p);
723
724         /*
725          * If the current task is an RT task, then
726          * try to see if we can wake this RT task up on another
727          * runqueue. Otherwise simply start this RT task
728          * on its current runqueue.
729          *
730          * We want to avoid overloading runqueues. Even if
731          * the RT task is of higher priority than the current RT task.
732          * RT tasks behave differently than other tasks. If
733          * one gets preempted, we try to push it off to another queue.
734          * So trying to keep a preempting RT task on the same
735          * cache hot CPU will force the running RT task to
736          * a cold CPU. So we waste all the cache for the lower
737          * RT task in hopes of saving some of a RT task
738          * that is just being woken and probably will have
739          * cold cache anyway.
740          */
741         if (unlikely(rt_task(rq->curr)) &&
742             (p->rt.nr_cpus_allowed > 1)) {
743                 int cpu = find_lowest_rq(p);
744
745                 return (cpu == -1) ? task_cpu(p) : cpu;
746         }
747
748         /*
749          * Otherwise, just let it ride on the affined RQ and the
750          * post-schedule router will push the preempted task away
751          */
752         return task_cpu(p);
753 }
754
755 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
756 {
757         cpumask_t mask;
758
759         if (rq->curr->rt.nr_cpus_allowed == 1)
760                 return;
761
762         if (p->rt.nr_cpus_allowed != 1
763             && cpupri_find(&rq->rd->cpupri, p, &mask))
764                 return;
765
766         if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
767                 return;
768
769         /*
770          * There appears to be other cpus that can accept
771          * current and none to run 'p', so lets reschedule
772          * to try and push current away:
773          */
774         requeue_task_rt(rq, p, 1);
775         resched_task(rq->curr);
776 }
777
778 #endif /* CONFIG_SMP */
779
780 /*
781  * Preempt the current task with a newly woken task if needed:
782  */
783 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
784 {
785         if (p->prio < rq->curr->prio) {
786                 resched_task(rq->curr);
787                 return;
788         }
789
790 #ifdef CONFIG_SMP
791         /*
792          * If:
793          *
794          * - the newly woken task is of equal priority to the current task
795          * - the newly woken task is non-migratable while current is migratable
796          * - current will be preempted on the next reschedule
797          *
798          * we should check to see if current can readily move to a different
799          * cpu.  If so, we will reschedule to allow the push logic to try
800          * to move current somewhere else, making room for our non-migratable
801          * task.
802          */
803         if (p->prio == rq->curr->prio && !need_resched())
804                 check_preempt_equal_prio(rq, p);
805 #endif
806 }
807
808 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
809                                                    struct rt_rq *rt_rq)
810 {
811         struct rt_prio_array *array = &rt_rq->active;
812         struct sched_rt_entity *next = NULL;
813         struct list_head *queue;
814         int idx;
815
816         idx = sched_find_first_bit(array->bitmap);
817         BUG_ON(idx >= MAX_RT_PRIO);
818
819         queue = array->queue + idx;
820         next = list_entry(queue->next, struct sched_rt_entity, run_list);
821
822         return next;
823 }
824
825 static struct task_struct *pick_next_task_rt(struct rq *rq)
826 {
827         struct sched_rt_entity *rt_se;
828         struct task_struct *p;
829         struct rt_rq *rt_rq;
830
831         rt_rq = &rq->rt;
832
833         if (unlikely(!rt_rq->rt_nr_running))
834                 return NULL;
835
836         if (rt_rq_throttled(rt_rq))
837                 return NULL;
838
839         do {
840                 rt_se = pick_next_rt_entity(rq, rt_rq);
841                 BUG_ON(!rt_se);
842                 rt_rq = group_rt_rq(rt_se);
843         } while (rt_rq);
844
845         p = rt_task_of(rt_se);
846         p->se.exec_start = rq->clock;
847         return p;
848 }
849
850 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
851 {
852         update_curr_rt(rq);
853         p->se.exec_start = 0;
854 }
855
856 #ifdef CONFIG_SMP
857
858 /* Only try algorithms three times */
859 #define RT_MAX_TRIES 3
860
861 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
862 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
863
864 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
865 {
866         if (!task_running(rq, p) &&
867             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
868             (p->rt.nr_cpus_allowed > 1))
869                 return 1;
870         return 0;
871 }
872
873 /* Return the second highest RT task, NULL otherwise */
874 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
875 {
876         struct task_struct *next = NULL;
877         struct sched_rt_entity *rt_se;
878         struct rt_prio_array *array;
879         struct rt_rq *rt_rq;
880         int idx;
881
882         for_each_leaf_rt_rq(rt_rq, rq) {
883                 array = &rt_rq->active;
884                 idx = sched_find_first_bit(array->bitmap);
885  next_idx:
886                 if (idx >= MAX_RT_PRIO)
887                         continue;
888                 if (next && next->prio < idx)
889                         continue;
890                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
891                         struct task_struct *p = rt_task_of(rt_se);
892                         if (pick_rt_task(rq, p, cpu)) {
893                                 next = p;
894                                 break;
895                         }
896                 }
897                 if (!next) {
898                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
899                         goto next_idx;
900                 }
901         }
902
903         return next;
904 }
905
906 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
907
908 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
909 {
910         int first;
911
912         /* "this_cpu" is cheaper to preempt than a remote processor */
913         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
914                 return this_cpu;
915
916         first = first_cpu(*mask);
917         if (first != NR_CPUS)
918                 return first;
919
920         return -1;
921 }
922
923 static int find_lowest_rq(struct task_struct *task)
924 {
925         struct sched_domain *sd;
926         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
927         int this_cpu = smp_processor_id();
928         int cpu      = task_cpu(task);
929
930         if (task->rt.nr_cpus_allowed == 1)
931                 return -1; /* No other targets possible */
932
933         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
934                 return -1; /* No targets found */
935
936         /*
937          * At this point we have built a mask of cpus representing the
938          * lowest priority tasks in the system.  Now we want to elect
939          * the best one based on our affinity and topology.
940          *
941          * We prioritize the last cpu that the task executed on since
942          * it is most likely cache-hot in that location.
943          */
944         if (cpu_isset(cpu, *lowest_mask))
945                 return cpu;
946
947         /*
948          * Otherwise, we consult the sched_domains span maps to figure
949          * out which cpu is logically closest to our hot cache data.
950          */
951         if (this_cpu == cpu)
952                 this_cpu = -1; /* Skip this_cpu opt if the same */
953
954         for_each_domain(cpu, sd) {
955                 if (sd->flags & SD_WAKE_AFFINE) {
956                         cpumask_t domain_mask;
957                         int       best_cpu;
958
959                         cpus_and(domain_mask, sd->span, *lowest_mask);
960
961                         best_cpu = pick_optimal_cpu(this_cpu,
962                                                     &domain_mask);
963                         if (best_cpu != -1)
964                                 return best_cpu;
965                 }
966         }
967
968         /*
969          * And finally, if there were no matches within the domains
970          * just give the caller *something* to work with from the compatible
971          * locations.
972          */
973         return pick_optimal_cpu(this_cpu, lowest_mask);
974 }
975
976 /* Will lock the rq it finds */
977 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
978 {
979         struct rq *lowest_rq = NULL;
980         int tries;
981         int cpu;
982
983         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
984                 cpu = find_lowest_rq(task);
985
986                 if ((cpu == -1) || (cpu == rq->cpu))
987                         break;
988
989                 lowest_rq = cpu_rq(cpu);
990
991                 /* if the prio of this runqueue changed, try again */
992                 if (double_lock_balance(rq, lowest_rq)) {
993                         /*
994                          * We had to unlock the run queue. In
995                          * the mean time, task could have
996                          * migrated already or had its affinity changed.
997                          * Also make sure that it wasn't scheduled on its rq.
998                          */
999                         if (unlikely(task_rq(task) != rq ||
1000                                      !cpu_isset(lowest_rq->cpu,
1001                                                 task->cpus_allowed) ||
1002                                      task_running(rq, task) ||
1003                                      !task->se.on_rq)) {
1004
1005                                 spin_unlock(&lowest_rq->lock);
1006                                 lowest_rq = NULL;
1007                                 break;
1008                         }
1009                 }
1010
1011                 /* If this rq is still suitable use it. */
1012                 if (lowest_rq->rt.highest_prio > task->prio)
1013                         break;
1014
1015                 /* try again */
1016                 spin_unlock(&lowest_rq->lock);
1017                 lowest_rq = NULL;
1018         }
1019
1020         return lowest_rq;
1021 }
1022
1023 /*
1024  * If the current CPU has more than one RT task, see if the non
1025  * running task can migrate over to a CPU that is running a task
1026  * of lesser priority.
1027  */
1028 static int push_rt_task(struct rq *rq)
1029 {
1030         struct task_struct *next_task;
1031         struct rq *lowest_rq;
1032         int ret = 0;
1033         int paranoid = RT_MAX_TRIES;
1034
1035         if (!rq->rt.overloaded)
1036                 return 0;
1037
1038         next_task = pick_next_highest_task_rt(rq, -1);
1039         if (!next_task)
1040                 return 0;
1041
1042  retry:
1043         if (unlikely(next_task == rq->curr)) {
1044                 WARN_ON(1);
1045                 return 0;
1046         }
1047
1048         /*
1049          * It's possible that the next_task slipped in of
1050          * higher priority than current. If that's the case
1051          * just reschedule current.
1052          */
1053         if (unlikely(next_task->prio < rq->curr->prio)) {
1054                 resched_task(rq->curr);
1055                 return 0;
1056         }
1057
1058         /* We might release rq lock */
1059         get_task_struct(next_task);
1060
1061         /* find_lock_lowest_rq locks the rq if found */
1062         lowest_rq = find_lock_lowest_rq(next_task, rq);
1063         if (!lowest_rq) {
1064                 struct task_struct *task;
1065                 /*
1066                  * find lock_lowest_rq releases rq->lock
1067                  * so it is possible that next_task has changed.
1068                  * If it has, then try again.
1069                  */
1070                 task = pick_next_highest_task_rt(rq, -1);
1071                 if (unlikely(task != next_task) && task && paranoid--) {
1072                         put_task_struct(next_task);
1073                         next_task = task;
1074                         goto retry;
1075                 }
1076                 goto out;
1077         }
1078
1079         deactivate_task(rq, next_task, 0);
1080         set_task_cpu(next_task, lowest_rq->cpu);
1081         activate_task(lowest_rq, next_task, 0);
1082
1083         resched_task(lowest_rq->curr);
1084
1085         spin_unlock(&lowest_rq->lock);
1086
1087         ret = 1;
1088 out:
1089         put_task_struct(next_task);
1090
1091         return ret;
1092 }
1093
1094 /*
1095  * TODO: Currently we just use the second highest prio task on
1096  *       the queue, and stop when it can't migrate (or there's
1097  *       no more RT tasks).  There may be a case where a lower
1098  *       priority RT task has a different affinity than the
1099  *       higher RT task. In this case the lower RT task could
1100  *       possibly be able to migrate where as the higher priority
1101  *       RT task could not.  We currently ignore this issue.
1102  *       Enhancements are welcome!
1103  */
1104 static void push_rt_tasks(struct rq *rq)
1105 {
1106         /* push_rt_task will return true if it moved an RT */
1107         while (push_rt_task(rq))
1108                 ;
1109 }
1110
1111 static int pull_rt_task(struct rq *this_rq)
1112 {
1113         int this_cpu = this_rq->cpu, ret = 0, cpu;
1114         struct task_struct *p, *next;
1115         struct rq *src_rq;
1116
1117         if (likely(!rt_overloaded(this_rq)))
1118                 return 0;
1119
1120         next = pick_next_task_rt(this_rq);
1121
1122         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1123                 if (this_cpu == cpu)
1124                         continue;
1125
1126                 src_rq = cpu_rq(cpu);
1127                 /*
1128                  * We can potentially drop this_rq's lock in
1129                  * double_lock_balance, and another CPU could
1130                  * steal our next task - hence we must cause
1131                  * the caller to recalculate the next task
1132                  * in that case:
1133                  */
1134                 if (double_lock_balance(this_rq, src_rq)) {
1135                         struct task_struct *old_next = next;
1136
1137                         next = pick_next_task_rt(this_rq);
1138                         if (next != old_next)
1139                                 ret = 1;
1140                 }
1141
1142                 /*
1143                  * Are there still pullable RT tasks?
1144                  */
1145                 if (src_rq->rt.rt_nr_running <= 1)
1146                         goto skip;
1147
1148                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1149
1150                 /*
1151                  * Do we have an RT task that preempts
1152                  * the to-be-scheduled task?
1153                  */
1154                 if (p && (!next || (p->prio < next->prio))) {
1155                         WARN_ON(p == src_rq->curr);
1156                         WARN_ON(!p->se.on_rq);
1157
1158                         /*
1159                          * There's a chance that p is higher in priority
1160                          * than what's currently running on its cpu.
1161                          * This is just that p is wakeing up and hasn't
1162                          * had a chance to schedule. We only pull
1163                          * p if it is lower in priority than the
1164                          * current task on the run queue or
1165                          * this_rq next task is lower in prio than
1166                          * the current task on that rq.
1167                          */
1168                         if (p->prio < src_rq->curr->prio ||
1169                             (next && next->prio < src_rq->curr->prio))
1170                                 goto skip;
1171
1172                         ret = 1;
1173
1174                         deactivate_task(src_rq, p, 0);
1175                         set_task_cpu(p, this_cpu);
1176                         activate_task(this_rq, p, 0);
1177                         /*
1178                          * We continue with the search, just in
1179                          * case there's an even higher prio task
1180                          * in another runqueue. (low likelyhood
1181                          * but possible)
1182                          *
1183                          * Update next so that we won't pick a task
1184                          * on another cpu with a priority lower (or equal)
1185                          * than the one we just picked.
1186                          */
1187                         next = p;
1188
1189                 }
1190  skip:
1191                 spin_unlock(&src_rq->lock);
1192         }
1193
1194         return ret;
1195 }
1196
1197 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1198 {
1199         /* Try to pull RT tasks here if we lower this rq's prio */
1200         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1201                 pull_rt_task(rq);
1202 }
1203
1204 static void post_schedule_rt(struct rq *rq)
1205 {
1206         /*
1207          * If we have more than one rt_task queued, then
1208          * see if we can push the other rt_tasks off to other CPUS.
1209          * Note we may release the rq lock, and since
1210          * the lock was owned by prev, we need to release it
1211          * first via finish_lock_switch and then reaquire it here.
1212          */
1213         if (unlikely(rq->rt.overloaded)) {
1214                 spin_lock_irq(&rq->lock);
1215                 push_rt_tasks(rq);
1216                 spin_unlock_irq(&rq->lock);
1217         }
1218 }
1219
1220 /*
1221  * If we are not running and we are not going to reschedule soon, we should
1222  * try to push tasks away now
1223  */
1224 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1225 {
1226         if (!task_running(rq, p) &&
1227             !test_tsk_need_resched(rq->curr) &&
1228             rq->rt.overloaded)
1229                 push_rt_tasks(rq);
1230 }
1231
1232 static unsigned long
1233 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1234                 unsigned long max_load_move,
1235                 struct sched_domain *sd, enum cpu_idle_type idle,
1236                 int *all_pinned, int *this_best_prio)
1237 {
1238         /* don't touch RT tasks */
1239         return 0;
1240 }
1241
1242 static int
1243 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1244                  struct sched_domain *sd, enum cpu_idle_type idle)
1245 {
1246         /* don't touch RT tasks */
1247         return 0;
1248 }
1249
1250 static void set_cpus_allowed_rt(struct task_struct *p,
1251                                 const cpumask_t *new_mask)
1252 {
1253         int weight = cpus_weight(*new_mask);
1254
1255         BUG_ON(!rt_task(p));
1256
1257         /*
1258          * Update the migration status of the RQ if we have an RT task
1259          * which is running AND changing its weight value.
1260          */
1261         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1262                 struct rq *rq = task_rq(p);
1263
1264                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1265                         rq->rt.rt_nr_migratory++;
1266                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1267                         BUG_ON(!rq->rt.rt_nr_migratory);
1268                         rq->rt.rt_nr_migratory--;
1269                 }
1270
1271                 update_rt_migration(rq);
1272         }
1273
1274         p->cpus_allowed    = *new_mask;
1275         p->rt.nr_cpus_allowed = weight;
1276 }
1277
1278 /* Assumes rq->lock is held */
1279 static void rq_online_rt(struct rq *rq)
1280 {
1281         if (rq->rt.overloaded)
1282                 rt_set_overload(rq);
1283
1284         __enable_runtime(rq);
1285
1286         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1287 }
1288
1289 /* Assumes rq->lock is held */
1290 static void rq_offline_rt(struct rq *rq)
1291 {
1292         if (rq->rt.overloaded)
1293                 rt_clear_overload(rq);
1294
1295         __disable_runtime(rq);
1296
1297         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1298 }
1299
1300 /*
1301  * When switch from the rt queue, we bring ourselves to a position
1302  * that we might want to pull RT tasks from other runqueues.
1303  */
1304 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1305                            int running)
1306 {
1307         /*
1308          * If there are other RT tasks then we will reschedule
1309          * and the scheduling of the other RT tasks will handle
1310          * the balancing. But if we are the last RT task
1311          * we may need to handle the pulling of RT tasks
1312          * now.
1313          */
1314         if (!rq->rt.rt_nr_running)
1315                 pull_rt_task(rq);
1316 }
1317 #endif /* CONFIG_SMP */
1318
1319 /*
1320  * When switching a task to RT, we may overload the runqueue
1321  * with RT tasks. In this case we try to push them off to
1322  * other runqueues.
1323  */
1324 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1325                            int running)
1326 {
1327         int check_resched = 1;
1328
1329         /*
1330          * If we are already running, then there's nothing
1331          * that needs to be done. But if we are not running
1332          * we may need to preempt the current running task.
1333          * If that current running task is also an RT task
1334          * then see if we can move to another run queue.
1335          */
1336         if (!running) {
1337 #ifdef CONFIG_SMP
1338                 if (rq->rt.overloaded && push_rt_task(rq) &&
1339                     /* Don't resched if we changed runqueues */
1340                     rq != task_rq(p))
1341                         check_resched = 0;
1342 #endif /* CONFIG_SMP */
1343                 if (check_resched && p->prio < rq->curr->prio)
1344                         resched_task(rq->curr);
1345         }
1346 }
1347
1348 /*
1349  * Priority of the task has changed. This may cause
1350  * us to initiate a push or pull.
1351  */
1352 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1353                             int oldprio, int running)
1354 {
1355         if (running) {
1356 #ifdef CONFIG_SMP
1357                 /*
1358                  * If our priority decreases while running, we
1359                  * may need to pull tasks to this runqueue.
1360                  */
1361                 if (oldprio < p->prio)
1362                         pull_rt_task(rq);
1363                 /*
1364                  * If there's a higher priority task waiting to run
1365                  * then reschedule. Note, the above pull_rt_task
1366                  * can release the rq lock and p could migrate.
1367                  * Only reschedule if p is still on the same runqueue.
1368                  */
1369                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1370                         resched_task(p);
1371 #else
1372                 /* For UP simply resched on drop of prio */
1373                 if (oldprio < p->prio)
1374                         resched_task(p);
1375 #endif /* CONFIG_SMP */
1376         } else {
1377                 /*
1378                  * This task is not running, but if it is
1379                  * greater than the current running task
1380                  * then reschedule.
1381                  */
1382                 if (p->prio < rq->curr->prio)
1383                         resched_task(rq->curr);
1384         }
1385 }
1386
1387 static void watchdog(struct rq *rq, struct task_struct *p)
1388 {
1389         unsigned long soft, hard;
1390
1391         if (!p->signal)
1392                 return;
1393
1394         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1395         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1396
1397         if (soft != RLIM_INFINITY) {
1398                 unsigned long next;
1399
1400                 p->rt.timeout++;
1401                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1402                 if (p->rt.timeout > next)
1403                         p->it_sched_expires = p->se.sum_exec_runtime;
1404         }
1405 }
1406
1407 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1408 {
1409         update_curr_rt(rq);
1410
1411         watchdog(rq, p);
1412
1413         /*
1414          * RR tasks need a special form of timeslice management.
1415          * FIFO tasks have no timeslices.
1416          */
1417         if (p->policy != SCHED_RR)
1418                 return;
1419
1420         if (--p->rt.time_slice)
1421                 return;
1422
1423         p->rt.time_slice = DEF_TIMESLICE;
1424
1425         /*
1426          * Requeue to the end of queue if we are not the only element
1427          * on the queue:
1428          */
1429         if (p->rt.run_list.prev != p->rt.run_list.next) {
1430                 requeue_task_rt(rq, p, 0);
1431                 set_tsk_need_resched(p);
1432         }
1433 }
1434
1435 static void set_curr_task_rt(struct rq *rq)
1436 {
1437         struct task_struct *p = rq->curr;
1438
1439         p->se.exec_start = rq->clock;
1440 }
1441
1442 static const struct sched_class rt_sched_class = {
1443         .next                   = &fair_sched_class,
1444         .enqueue_task           = enqueue_task_rt,
1445         .dequeue_task           = dequeue_task_rt,
1446         .yield_task             = yield_task_rt,
1447 #ifdef CONFIG_SMP
1448         .select_task_rq         = select_task_rq_rt,
1449 #endif /* CONFIG_SMP */
1450
1451         .check_preempt_curr     = check_preempt_curr_rt,
1452
1453         .pick_next_task         = pick_next_task_rt,
1454         .put_prev_task          = put_prev_task_rt,
1455
1456 #ifdef CONFIG_SMP
1457         .load_balance           = load_balance_rt,
1458         .move_one_task          = move_one_task_rt,
1459         .set_cpus_allowed       = set_cpus_allowed_rt,
1460         .rq_online              = rq_online_rt,
1461         .rq_offline             = rq_offline_rt,
1462         .pre_schedule           = pre_schedule_rt,
1463         .post_schedule          = post_schedule_rt,
1464         .task_wake_up           = task_wake_up_rt,
1465         .switched_from          = switched_from_rt,
1466 #endif
1467
1468         .set_curr_task          = set_curr_task_rt,
1469         .task_tick              = task_tick_rt,
1470
1471         .prio_changed           = prio_changed_rt,
1472         .switched_to            = switched_to_rt,
1473 };
1474
1475 #ifdef CONFIG_SCHED_DEBUG
1476 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1477
1478 static void print_rt_stats(struct seq_file *m, int cpu)
1479 {
1480         struct rt_rq *rt_rq;
1481
1482         rcu_read_lock();
1483         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1484                 print_rt_rq(m, cpu, rt_rq);
1485         rcu_read_unlock();
1486 }
1487 #endif /* CONFIG_SCHED_DEBUG */