]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/sched/rt.c
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18         struct rt_bandwidth *rt_b =
19                 container_of(timer, struct rt_bandwidth, rt_period_timer);
20         ktime_t now;
21         int overrun;
22         int idle = 0;
23
24         for (;;) {
25                 now = hrtimer_cb_get_time(timer);
26                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28                 if (!overrun)
29                         break;
30
31                 idle = do_sched_rt_period_timer(rt_b, overrun);
32         }
33
34         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39         rt_b->rt_period = ns_to_ktime(period);
40         rt_b->rt_runtime = runtime;
41
42         raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44         hrtimer_init(&rt_b->rt_period_timer,
45                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46         rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52                 return;
53
54         if (hrtimer_active(&rt_b->rt_period_timer))
55                 return;
56
57         raw_spin_lock(&rt_b->rt_runtime_lock);
58         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59         raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64         struct rt_prio_array *array;
65         int i;
66
67         array = &rt_rq->active;
68         for (i = 0; i < MAX_RT_PRIO; i++) {
69                 INIT_LIST_HEAD(array->queue + i);
70                 __clear_bit(i, array->bitmap);
71         }
72         /* delimiter for bitsearch: */
73         __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76         rt_rq->highest_prio.curr = MAX_RT_PRIO;
77         rt_rq->highest_prio.next = MAX_RT_PRIO;
78         rt_rq->rt_nr_migratory = 0;
79         rt_rq->overloaded = 0;
80         plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82         /* We start is dequeued state, because no RT tasks are queued */
83         rt_rq->rt_queued = 0;
84
85         rt_rq->rt_time = 0;
86         rt_rq->rt_throttled = 0;
87         rt_rq->rt_runtime = 0;
88         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
89 }
90
91 #ifdef CONFIG_RT_GROUP_SCHED
92 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
93 {
94         hrtimer_cancel(&rt_b->rt_period_timer);
95 }
96
97 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
98
99 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
100 {
101 #ifdef CONFIG_SCHED_DEBUG
102         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
103 #endif
104         return container_of(rt_se, struct task_struct, rt);
105 }
106
107 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
108 {
109         return rt_rq->rq;
110 }
111
112 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
113 {
114         return rt_se->rt_rq;
115 }
116
117 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
118 {
119         struct rt_rq *rt_rq = rt_se->rt_rq;
120
121         return rt_rq->rq;
122 }
123
124 void free_rt_sched_group(struct task_group *tg)
125 {
126         int i;
127
128         if (tg->rt_se)
129                 destroy_rt_bandwidth(&tg->rt_bandwidth);
130
131         for_each_possible_cpu(i) {
132                 if (tg->rt_rq)
133                         kfree(tg->rt_rq[i]);
134                 if (tg->rt_se)
135                         kfree(tg->rt_se[i]);
136         }
137
138         kfree(tg->rt_rq);
139         kfree(tg->rt_se);
140 }
141
142 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
143                 struct sched_rt_entity *rt_se, int cpu,
144                 struct sched_rt_entity *parent)
145 {
146         struct rq *rq = cpu_rq(cpu);
147
148         rt_rq->highest_prio.curr = MAX_RT_PRIO;
149         rt_rq->rt_nr_boosted = 0;
150         rt_rq->rq = rq;
151         rt_rq->tg = tg;
152
153         tg->rt_rq[cpu] = rt_rq;
154         tg->rt_se[cpu] = rt_se;
155
156         if (!rt_se)
157                 return;
158
159         if (!parent)
160                 rt_se->rt_rq = &rq->rt;
161         else
162                 rt_se->rt_rq = parent->my_q;
163
164         rt_se->my_q = rt_rq;
165         rt_se->parent = parent;
166         INIT_LIST_HEAD(&rt_se->run_list);
167 }
168
169 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
170 {
171         struct rt_rq *rt_rq;
172         struct sched_rt_entity *rt_se;
173         int i;
174
175         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
176         if (!tg->rt_rq)
177                 goto err;
178         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
179         if (!tg->rt_se)
180                 goto err;
181
182         init_rt_bandwidth(&tg->rt_bandwidth,
183                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
184
185         for_each_possible_cpu(i) {
186                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
187                                      GFP_KERNEL, cpu_to_node(i));
188                 if (!rt_rq)
189                         goto err;
190
191                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
192                                      GFP_KERNEL, cpu_to_node(i));
193                 if (!rt_se)
194                         goto err_free_rq;
195
196                 init_rt_rq(rt_rq, cpu_rq(i));
197                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
198                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
199         }
200
201         return 1;
202
203 err_free_rq:
204         kfree(rt_rq);
205 err:
206         return 0;
207 }
208
209 #else /* CONFIG_RT_GROUP_SCHED */
210
211 #define rt_entity_is_task(rt_se) (1)
212
213 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
214 {
215         return container_of(rt_se, struct task_struct, rt);
216 }
217
218 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
219 {
220         return container_of(rt_rq, struct rq, rt);
221 }
222
223 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
224 {
225         struct task_struct *p = rt_task_of(rt_se);
226
227         return task_rq(p);
228 }
229
230 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
231 {
232         struct rq *rq = rq_of_rt_se(rt_se);
233
234         return &rq->rt;
235 }
236
237 void free_rt_sched_group(struct task_group *tg) { }
238
239 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
240 {
241         return 1;
242 }
243 #endif /* CONFIG_RT_GROUP_SCHED */
244
245 #ifdef CONFIG_SMP
246
247 static int pull_rt_task(struct rq *this_rq);
248
249 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
250 {
251         /* Try to pull RT tasks here if we lower this rq's prio */
252         return rq->rt.highest_prio.curr > prev->prio;
253 }
254
255 static inline int rt_overloaded(struct rq *rq)
256 {
257         return atomic_read(&rq->rd->rto_count);
258 }
259
260 static inline void rt_set_overload(struct rq *rq)
261 {
262         if (!rq->online)
263                 return;
264
265         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
266         /*
267          * Make sure the mask is visible before we set
268          * the overload count. That is checked to determine
269          * if we should look at the mask. It would be a shame
270          * if we looked at the mask, but the mask was not
271          * updated yet.
272          *
273          * Matched by the barrier in pull_rt_task().
274          */
275         smp_wmb();
276         atomic_inc(&rq->rd->rto_count);
277 }
278
279 static inline void rt_clear_overload(struct rq *rq)
280 {
281         if (!rq->online)
282                 return;
283
284         /* the order here really doesn't matter */
285         atomic_dec(&rq->rd->rto_count);
286         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
287 }
288
289 static void update_rt_migration(struct rt_rq *rt_rq)
290 {
291         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
292                 if (!rt_rq->overloaded) {
293                         rt_set_overload(rq_of_rt_rq(rt_rq));
294                         rt_rq->overloaded = 1;
295                 }
296         } else if (rt_rq->overloaded) {
297                 rt_clear_overload(rq_of_rt_rq(rt_rq));
298                 rt_rq->overloaded = 0;
299         }
300 }
301
302 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
303 {
304         struct task_struct *p;
305
306         if (!rt_entity_is_task(rt_se))
307                 return;
308
309         p = rt_task_of(rt_se);
310         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
311
312         rt_rq->rt_nr_total++;
313         if (p->nr_cpus_allowed > 1)
314                 rt_rq->rt_nr_migratory++;
315
316         update_rt_migration(rt_rq);
317 }
318
319 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
320 {
321         struct task_struct *p;
322
323         if (!rt_entity_is_task(rt_se))
324                 return;
325
326         p = rt_task_of(rt_se);
327         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
328
329         rt_rq->rt_nr_total--;
330         if (p->nr_cpus_allowed > 1)
331                 rt_rq->rt_nr_migratory--;
332
333         update_rt_migration(rt_rq);
334 }
335
336 static inline int has_pushable_tasks(struct rq *rq)
337 {
338         return !plist_head_empty(&rq->rt.pushable_tasks);
339 }
340
341 static inline void set_post_schedule(struct rq *rq)
342 {
343         /*
344          * We detect this state here so that we can avoid taking the RQ
345          * lock again later if there is no need to push
346          */
347         rq->post_schedule = has_pushable_tasks(rq);
348 }
349
350 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
351 {
352         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
353         plist_node_init(&p->pushable_tasks, p->prio);
354         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
355
356         /* Update the highest prio pushable task */
357         if (p->prio < rq->rt.highest_prio.next)
358                 rq->rt.highest_prio.next = p->prio;
359 }
360
361 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
362 {
363         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
364
365         /* Update the new highest prio pushable task */
366         if (has_pushable_tasks(rq)) {
367                 p = plist_first_entry(&rq->rt.pushable_tasks,
368                                       struct task_struct, pushable_tasks);
369                 rq->rt.highest_prio.next = p->prio;
370         } else
371                 rq->rt.highest_prio.next = MAX_RT_PRIO;
372 }
373
374 #else
375
376 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377 {
378 }
379
380 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
381 {
382 }
383
384 static inline
385 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
386 {
387 }
388
389 static inline
390 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
391 {
392 }
393
394 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
395 {
396         return false;
397 }
398
399 static inline int pull_rt_task(struct rq *this_rq)
400 {
401         return 0;
402 }
403
404 static inline void set_post_schedule(struct rq *rq)
405 {
406 }
407 #endif /* CONFIG_SMP */
408
409 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411
412 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
413 {
414         return !list_empty(&rt_se->run_list);
415 }
416
417 #ifdef CONFIG_RT_GROUP_SCHED
418
419 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
420 {
421         if (!rt_rq->tg)
422                 return RUNTIME_INF;
423
424         return rt_rq->rt_runtime;
425 }
426
427 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
428 {
429         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
430 }
431
432 typedef struct task_group *rt_rq_iter_t;
433
434 static inline struct task_group *next_task_group(struct task_group *tg)
435 {
436         do {
437                 tg = list_entry_rcu(tg->list.next,
438                         typeof(struct task_group), list);
439         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
440
441         if (&tg->list == &task_groups)
442                 tg = NULL;
443
444         return tg;
445 }
446
447 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
448         for (iter = container_of(&task_groups, typeof(*iter), list);    \
449                 (iter = next_task_group(iter)) &&                       \
450                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
451
452 #define for_each_sched_rt_entity(rt_se) \
453         for (; rt_se; rt_se = rt_se->parent)
454
455 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
456 {
457         return rt_se->my_q;
458 }
459
460 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
461 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
462
463 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
464 {
465         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
466         struct sched_rt_entity *rt_se;
467
468         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
469
470         rt_se = rt_rq->tg->rt_se[cpu];
471
472         if (rt_rq->rt_nr_running) {
473                 if (!rt_se)
474                         enqueue_top_rt_rq(rt_rq);
475                 else if (!on_rt_rq(rt_se))
476                         enqueue_rt_entity(rt_se, false);
477
478                 if (rt_rq->highest_prio.curr < curr->prio)
479                         resched_task(curr);
480         }
481 }
482
483 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
484 {
485         struct sched_rt_entity *rt_se;
486         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
487
488         rt_se = rt_rq->tg->rt_se[cpu];
489
490         if (!rt_se)
491                 dequeue_top_rt_rq(rt_rq);
492         else if (on_rt_rq(rt_se))
493                 dequeue_rt_entity(rt_se);
494 }
495
496 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
497 {
498         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
499 }
500
501 static int rt_se_boosted(struct sched_rt_entity *rt_se)
502 {
503         struct rt_rq *rt_rq = group_rt_rq(rt_se);
504         struct task_struct *p;
505
506         if (rt_rq)
507                 return !!rt_rq->rt_nr_boosted;
508
509         p = rt_task_of(rt_se);
510         return p->prio != p->normal_prio;
511 }
512
513 #ifdef CONFIG_SMP
514 static inline const struct cpumask *sched_rt_period_mask(void)
515 {
516         return this_rq()->rd->span;
517 }
518 #else
519 static inline const struct cpumask *sched_rt_period_mask(void)
520 {
521         return cpu_online_mask;
522 }
523 #endif
524
525 static inline
526 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
527 {
528         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
529 }
530
531 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
532 {
533         return &rt_rq->tg->rt_bandwidth;
534 }
535
536 #else /* !CONFIG_RT_GROUP_SCHED */
537
538 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
539 {
540         return rt_rq->rt_runtime;
541 }
542
543 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
544 {
545         return ktime_to_ns(def_rt_bandwidth.rt_period);
546 }
547
548 typedef struct rt_rq *rt_rq_iter_t;
549
550 #define for_each_rt_rq(rt_rq, iter, rq) \
551         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
552
553 #define for_each_sched_rt_entity(rt_se) \
554         for (; rt_se; rt_se = NULL)
555
556 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
557 {
558         return NULL;
559 }
560
561 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
562 {
563         struct rq *rq = rq_of_rt_rq(rt_rq);
564
565         if (!rt_rq->rt_nr_running)
566                 return;
567
568         enqueue_top_rt_rq(rt_rq);
569         resched_task(rq->curr);
570 }
571
572 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
573 {
574         dequeue_top_rt_rq(rt_rq);
575 }
576
577 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
578 {
579         return rt_rq->rt_throttled;
580 }
581
582 static inline const struct cpumask *sched_rt_period_mask(void)
583 {
584         return cpu_online_mask;
585 }
586
587 static inline
588 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
589 {
590         return &cpu_rq(cpu)->rt;
591 }
592
593 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
594 {
595         return &def_rt_bandwidth;
596 }
597
598 #endif /* CONFIG_RT_GROUP_SCHED */
599
600 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
601 {
602         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
603
604         return (hrtimer_active(&rt_b->rt_period_timer) ||
605                 rt_rq->rt_time < rt_b->rt_runtime);
606 }
607
608 #ifdef CONFIG_SMP
609 /*
610  * We ran out of runtime, see if we can borrow some from our neighbours.
611  */
612 static int do_balance_runtime(struct rt_rq *rt_rq)
613 {
614         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
615         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
616         int i, weight, more = 0;
617         u64 rt_period;
618
619         weight = cpumask_weight(rd->span);
620
621         raw_spin_lock(&rt_b->rt_runtime_lock);
622         rt_period = ktime_to_ns(rt_b->rt_period);
623         for_each_cpu(i, rd->span) {
624                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
625                 s64 diff;
626
627                 if (iter == rt_rq)
628                         continue;
629
630                 raw_spin_lock(&iter->rt_runtime_lock);
631                 /*
632                  * Either all rqs have inf runtime and there's nothing to steal
633                  * or __disable_runtime() below sets a specific rq to inf to
634                  * indicate its been disabled and disalow stealing.
635                  */
636                 if (iter->rt_runtime == RUNTIME_INF)
637                         goto next;
638
639                 /*
640                  * From runqueues with spare time, take 1/n part of their
641                  * spare time, but no more than our period.
642                  */
643                 diff = iter->rt_runtime - iter->rt_time;
644                 if (diff > 0) {
645                         diff = div_u64((u64)diff, weight);
646                         if (rt_rq->rt_runtime + diff > rt_period)
647                                 diff = rt_period - rt_rq->rt_runtime;
648                         iter->rt_runtime -= diff;
649                         rt_rq->rt_runtime += diff;
650                         more = 1;
651                         if (rt_rq->rt_runtime == rt_period) {
652                                 raw_spin_unlock(&iter->rt_runtime_lock);
653                                 break;
654                         }
655                 }
656 next:
657                 raw_spin_unlock(&iter->rt_runtime_lock);
658         }
659         raw_spin_unlock(&rt_b->rt_runtime_lock);
660
661         return more;
662 }
663
664 /*
665  * Ensure this RQ takes back all the runtime it lend to its neighbours.
666  */
667 static void __disable_runtime(struct rq *rq)
668 {
669         struct root_domain *rd = rq->rd;
670         rt_rq_iter_t iter;
671         struct rt_rq *rt_rq;
672
673         if (unlikely(!scheduler_running))
674                 return;
675
676         for_each_rt_rq(rt_rq, iter, rq) {
677                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
678                 s64 want;
679                 int i;
680
681                 raw_spin_lock(&rt_b->rt_runtime_lock);
682                 raw_spin_lock(&rt_rq->rt_runtime_lock);
683                 /*
684                  * Either we're all inf and nobody needs to borrow, or we're
685                  * already disabled and thus have nothing to do, or we have
686                  * exactly the right amount of runtime to take out.
687                  */
688                 if (rt_rq->rt_runtime == RUNTIME_INF ||
689                                 rt_rq->rt_runtime == rt_b->rt_runtime)
690                         goto balanced;
691                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
692
693                 /*
694                  * Calculate the difference between what we started out with
695                  * and what we current have, that's the amount of runtime
696                  * we lend and now have to reclaim.
697                  */
698                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
699
700                 /*
701                  * Greedy reclaim, take back as much as we can.
702                  */
703                 for_each_cpu(i, rd->span) {
704                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
705                         s64 diff;
706
707                         /*
708                          * Can't reclaim from ourselves or disabled runqueues.
709                          */
710                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
711                                 continue;
712
713                         raw_spin_lock(&iter->rt_runtime_lock);
714                         if (want > 0) {
715                                 diff = min_t(s64, iter->rt_runtime, want);
716                                 iter->rt_runtime -= diff;
717                                 want -= diff;
718                         } else {
719                                 iter->rt_runtime -= want;
720                                 want -= want;
721                         }
722                         raw_spin_unlock(&iter->rt_runtime_lock);
723
724                         if (!want)
725                                 break;
726                 }
727
728                 raw_spin_lock(&rt_rq->rt_runtime_lock);
729                 /*
730                  * We cannot be left wanting - that would mean some runtime
731                  * leaked out of the system.
732                  */
733                 BUG_ON(want);
734 balanced:
735                 /*
736                  * Disable all the borrow logic by pretending we have inf
737                  * runtime - in which case borrowing doesn't make sense.
738                  */
739                 rt_rq->rt_runtime = RUNTIME_INF;
740                 rt_rq->rt_throttled = 0;
741                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
742                 raw_spin_unlock(&rt_b->rt_runtime_lock);
743         }
744 }
745
746 static void __enable_runtime(struct rq *rq)
747 {
748         rt_rq_iter_t iter;
749         struct rt_rq *rt_rq;
750
751         if (unlikely(!scheduler_running))
752                 return;
753
754         /*
755          * Reset each runqueue's bandwidth settings
756          */
757         for_each_rt_rq(rt_rq, iter, rq) {
758                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
759
760                 raw_spin_lock(&rt_b->rt_runtime_lock);
761                 raw_spin_lock(&rt_rq->rt_runtime_lock);
762                 rt_rq->rt_runtime = rt_b->rt_runtime;
763                 rt_rq->rt_time = 0;
764                 rt_rq->rt_throttled = 0;
765                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
766                 raw_spin_unlock(&rt_b->rt_runtime_lock);
767         }
768 }
769
770 static int balance_runtime(struct rt_rq *rt_rq)
771 {
772         int more = 0;
773
774         if (!sched_feat(RT_RUNTIME_SHARE))
775                 return more;
776
777         if (rt_rq->rt_time > rt_rq->rt_runtime) {
778                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
779                 more = do_balance_runtime(rt_rq);
780                 raw_spin_lock(&rt_rq->rt_runtime_lock);
781         }
782
783         return more;
784 }
785 #else /* !CONFIG_SMP */
786 static inline int balance_runtime(struct rt_rq *rt_rq)
787 {
788         return 0;
789 }
790 #endif /* CONFIG_SMP */
791
792 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
793 {
794         int i, idle = 1, throttled = 0;
795         const struct cpumask *span;
796
797         span = sched_rt_period_mask();
798 #ifdef CONFIG_RT_GROUP_SCHED
799         /*
800          * FIXME: isolated CPUs should really leave the root task group,
801          * whether they are isolcpus or were isolated via cpusets, lest
802          * the timer run on a CPU which does not service all runqueues,
803          * potentially leaving other CPUs indefinitely throttled.  If
804          * isolation is really required, the user will turn the throttle
805          * off to kill the perturbations it causes anyway.  Meanwhile,
806          * this maintains functionality for boot and/or troubleshooting.
807          */
808         if (rt_b == &root_task_group.rt_bandwidth)
809                 span = cpu_online_mask;
810 #endif
811         for_each_cpu(i, span) {
812                 int enqueue = 0;
813                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
814                 struct rq *rq = rq_of_rt_rq(rt_rq);
815
816                 raw_spin_lock(&rq->lock);
817                 if (rt_rq->rt_time) {
818                         u64 runtime;
819
820                         raw_spin_lock(&rt_rq->rt_runtime_lock);
821                         if (rt_rq->rt_throttled)
822                                 balance_runtime(rt_rq);
823                         runtime = rt_rq->rt_runtime;
824                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
825                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
826                                 rt_rq->rt_throttled = 0;
827                                 enqueue = 1;
828
829                                 /*
830                                  * Force a clock update if the CPU was idle,
831                                  * lest wakeup -> unthrottle time accumulate.
832                                  */
833                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
834                                         rq->skip_clock_update = -1;
835                         }
836                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
837                                 idle = 0;
838                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
839                 } else if (rt_rq->rt_nr_running) {
840                         idle = 0;
841                         if (!rt_rq_throttled(rt_rq))
842                                 enqueue = 1;
843                 }
844                 if (rt_rq->rt_throttled)
845                         throttled = 1;
846
847                 if (enqueue)
848                         sched_rt_rq_enqueue(rt_rq);
849                 raw_spin_unlock(&rq->lock);
850         }
851
852         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
853                 return 1;
854
855         return idle;
856 }
857
858 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
859 {
860 #ifdef CONFIG_RT_GROUP_SCHED
861         struct rt_rq *rt_rq = group_rt_rq(rt_se);
862
863         if (rt_rq)
864                 return rt_rq->highest_prio.curr;
865 #endif
866
867         return rt_task_of(rt_se)->prio;
868 }
869
870 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
871 {
872         u64 runtime = sched_rt_runtime(rt_rq);
873
874         if (rt_rq->rt_throttled)
875                 return rt_rq_throttled(rt_rq);
876
877         if (runtime >= sched_rt_period(rt_rq))
878                 return 0;
879
880         balance_runtime(rt_rq);
881         runtime = sched_rt_runtime(rt_rq);
882         if (runtime == RUNTIME_INF)
883                 return 0;
884
885         if (rt_rq->rt_time > runtime) {
886                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
887
888                 /*
889                  * Don't actually throttle groups that have no runtime assigned
890                  * but accrue some time due to boosting.
891                  */
892                 if (likely(rt_b->rt_runtime)) {
893                         rt_rq->rt_throttled = 1;
894                         printk_deferred_once("sched: RT throttling activated\n");
895                 } else {
896                         /*
897                          * In case we did anyway, make it go away,
898                          * replenishment is a joke, since it will replenish us
899                          * with exactly 0 ns.
900                          */
901                         rt_rq->rt_time = 0;
902                 }
903
904                 if (rt_rq_throttled(rt_rq)) {
905                         sched_rt_rq_dequeue(rt_rq);
906                         return 1;
907                 }
908         }
909
910         return 0;
911 }
912
913 /*
914  * Update the current task's runtime statistics. Skip current tasks that
915  * are not in our scheduling class.
916  */
917 static void update_curr_rt(struct rq *rq)
918 {
919         struct task_struct *curr = rq->curr;
920         struct sched_rt_entity *rt_se = &curr->rt;
921         u64 delta_exec;
922
923         if (curr->sched_class != &rt_sched_class)
924                 return;
925
926         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
927         if (unlikely((s64)delta_exec <= 0))
928                 return;
929
930         schedstat_set(curr->se.statistics.exec_max,
931                       max(curr->se.statistics.exec_max, delta_exec));
932
933         curr->se.sum_exec_runtime += delta_exec;
934         account_group_exec_runtime(curr, delta_exec);
935
936         curr->se.exec_start = rq_clock_task(rq);
937         cpuacct_charge(curr, delta_exec);
938
939         sched_rt_avg_update(rq, delta_exec);
940
941         if (!rt_bandwidth_enabled())
942                 return;
943
944         for_each_sched_rt_entity(rt_se) {
945                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
946
947                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
948                         raw_spin_lock(&rt_rq->rt_runtime_lock);
949                         rt_rq->rt_time += delta_exec;
950                         if (sched_rt_runtime_exceeded(rt_rq))
951                                 resched_task(curr);
952                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
953                 }
954         }
955 }
956
957 static void
958 dequeue_top_rt_rq(struct rt_rq *rt_rq)
959 {
960         struct rq *rq = rq_of_rt_rq(rt_rq);
961
962         BUG_ON(&rq->rt != rt_rq);
963
964         if (!rt_rq->rt_queued)
965                 return;
966
967         BUG_ON(!rq->nr_running);
968
969         sub_nr_running(rq, rt_rq->rt_nr_running);
970         rt_rq->rt_queued = 0;
971 }
972
973 static void
974 enqueue_top_rt_rq(struct rt_rq *rt_rq)
975 {
976         struct rq *rq = rq_of_rt_rq(rt_rq);
977
978         BUG_ON(&rq->rt != rt_rq);
979
980         if (rt_rq->rt_queued)
981                 return;
982         if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
983                 return;
984
985         add_nr_running(rq, rt_rq->rt_nr_running);
986         rt_rq->rt_queued = 1;
987 }
988
989 #if defined CONFIG_SMP
990
991 static void
992 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
993 {
994         struct rq *rq = rq_of_rt_rq(rt_rq);
995
996 #ifdef CONFIG_RT_GROUP_SCHED
997         /*
998          * Change rq's cpupri only if rt_rq is the top queue.
999          */
1000         if (&rq->rt != rt_rq)
1001                 return;
1002 #endif
1003         if (rq->online && prio < prev_prio)
1004                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1005 }
1006
1007 static void
1008 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1009 {
1010         struct rq *rq = rq_of_rt_rq(rt_rq);
1011
1012 #ifdef CONFIG_RT_GROUP_SCHED
1013         /*
1014          * Change rq's cpupri only if rt_rq is the top queue.
1015          */
1016         if (&rq->rt != rt_rq)
1017                 return;
1018 #endif
1019         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1020                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1021 }
1022
1023 #else /* CONFIG_SMP */
1024
1025 static inline
1026 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1027 static inline
1028 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1029
1030 #endif /* CONFIG_SMP */
1031
1032 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1033 static void
1034 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1035 {
1036         int prev_prio = rt_rq->highest_prio.curr;
1037
1038         if (prio < prev_prio)
1039                 rt_rq->highest_prio.curr = prio;
1040
1041         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1042 }
1043
1044 static void
1045 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1046 {
1047         int prev_prio = rt_rq->highest_prio.curr;
1048
1049         if (rt_rq->rt_nr_running) {
1050
1051                 WARN_ON(prio < prev_prio);
1052
1053                 /*
1054                  * This may have been our highest task, and therefore
1055                  * we may have some recomputation to do
1056                  */
1057                 if (prio == prev_prio) {
1058                         struct rt_prio_array *array = &rt_rq->active;
1059
1060                         rt_rq->highest_prio.curr =
1061                                 sched_find_first_bit(array->bitmap);
1062                 }
1063
1064         } else
1065                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1066
1067         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1068 }
1069
1070 #else
1071
1072 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1073 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1074
1075 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1076
1077 #ifdef CONFIG_RT_GROUP_SCHED
1078
1079 static void
1080 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1081 {
1082         if (rt_se_boosted(rt_se))
1083                 rt_rq->rt_nr_boosted++;
1084
1085         if (rt_rq->tg)
1086                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1087 }
1088
1089 static void
1090 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1091 {
1092         if (rt_se_boosted(rt_se))
1093                 rt_rq->rt_nr_boosted--;
1094
1095         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1096 }
1097
1098 #else /* CONFIG_RT_GROUP_SCHED */
1099
1100 static void
1101 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1102 {
1103         start_rt_bandwidth(&def_rt_bandwidth);
1104 }
1105
1106 static inline
1107 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1108
1109 #endif /* CONFIG_RT_GROUP_SCHED */
1110
1111 static inline
1112 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1113 {
1114         struct rt_rq *group_rq = group_rt_rq(rt_se);
1115
1116         if (group_rq)
1117                 return group_rq->rt_nr_running;
1118         else
1119                 return 1;
1120 }
1121
1122 static inline
1123 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1124 {
1125         int prio = rt_se_prio(rt_se);
1126
1127         WARN_ON(!rt_prio(prio));
1128         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1129
1130         inc_rt_prio(rt_rq, prio);
1131         inc_rt_migration(rt_se, rt_rq);
1132         inc_rt_group(rt_se, rt_rq);
1133 }
1134
1135 static inline
1136 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1137 {
1138         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1139         WARN_ON(!rt_rq->rt_nr_running);
1140         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1141
1142         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1143         dec_rt_migration(rt_se, rt_rq);
1144         dec_rt_group(rt_se, rt_rq);
1145 }
1146
1147 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1148 {
1149         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1150         struct rt_prio_array *array = &rt_rq->active;
1151         struct rt_rq *group_rq = group_rt_rq(rt_se);
1152         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1153
1154         /*
1155          * Don't enqueue the group if its throttled, or when empty.
1156          * The latter is a consequence of the former when a child group
1157          * get throttled and the current group doesn't have any other
1158          * active members.
1159          */
1160         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1161                 return;
1162
1163         if (head)
1164                 list_add(&rt_se->run_list, queue);
1165         else
1166                 list_add_tail(&rt_se->run_list, queue);
1167         __set_bit(rt_se_prio(rt_se), array->bitmap);
1168
1169         inc_rt_tasks(rt_se, rt_rq);
1170 }
1171
1172 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1173 {
1174         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1175         struct rt_prio_array *array = &rt_rq->active;
1176
1177         list_del_init(&rt_se->run_list);
1178         if (list_empty(array->queue + rt_se_prio(rt_se)))
1179                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1180
1181         dec_rt_tasks(rt_se, rt_rq);
1182 }
1183
1184 /*
1185  * Because the prio of an upper entry depends on the lower
1186  * entries, we must remove entries top - down.
1187  */
1188 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1189 {
1190         struct sched_rt_entity *back = NULL;
1191
1192         for_each_sched_rt_entity(rt_se) {
1193                 rt_se->back = back;
1194                 back = rt_se;
1195         }
1196
1197         dequeue_top_rt_rq(rt_rq_of_se(back));
1198
1199         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1200                 if (on_rt_rq(rt_se))
1201                         __dequeue_rt_entity(rt_se);
1202         }
1203 }
1204
1205 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1206 {
1207         struct rq *rq = rq_of_rt_se(rt_se);
1208
1209         dequeue_rt_stack(rt_se);
1210         for_each_sched_rt_entity(rt_se)
1211                 __enqueue_rt_entity(rt_se, head);
1212         enqueue_top_rt_rq(&rq->rt);
1213 }
1214
1215 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1216 {
1217         struct rq *rq = rq_of_rt_se(rt_se);
1218
1219         dequeue_rt_stack(rt_se);
1220
1221         for_each_sched_rt_entity(rt_se) {
1222                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1223
1224                 if (rt_rq && rt_rq->rt_nr_running)
1225                         __enqueue_rt_entity(rt_se, false);
1226         }
1227         enqueue_top_rt_rq(&rq->rt);
1228 }
1229
1230 /*
1231  * Adding/removing a task to/from a priority array:
1232  */
1233 static void
1234 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1235 {
1236         struct sched_rt_entity *rt_se = &p->rt;
1237
1238         if (flags & ENQUEUE_WAKEUP)
1239                 rt_se->timeout = 0;
1240
1241         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1242
1243         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1244                 enqueue_pushable_task(rq, p);
1245 }
1246
1247 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1248 {
1249         struct sched_rt_entity *rt_se = &p->rt;
1250
1251         update_curr_rt(rq);
1252         dequeue_rt_entity(rt_se);
1253
1254         dequeue_pushable_task(rq, p);
1255 }
1256
1257 /*
1258  * Put task to the head or the end of the run list without the overhead of
1259  * dequeue followed by enqueue.
1260  */
1261 static void
1262 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1263 {
1264         if (on_rt_rq(rt_se)) {
1265                 struct rt_prio_array *array = &rt_rq->active;
1266                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1267
1268                 if (head)
1269                         list_move(&rt_se->run_list, queue);
1270                 else
1271                         list_move_tail(&rt_se->run_list, queue);
1272         }
1273 }
1274
1275 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1276 {
1277         struct sched_rt_entity *rt_se = &p->rt;
1278         struct rt_rq *rt_rq;
1279
1280         for_each_sched_rt_entity(rt_se) {
1281                 rt_rq = rt_rq_of_se(rt_se);
1282                 requeue_rt_entity(rt_rq, rt_se, head);
1283         }
1284 }
1285
1286 static void yield_task_rt(struct rq *rq)
1287 {
1288         requeue_task_rt(rq, rq->curr, 0);
1289 }
1290
1291 #ifdef CONFIG_SMP
1292 static int find_lowest_rq(struct task_struct *task);
1293
1294 static int
1295 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1296 {
1297         struct task_struct *curr;
1298         struct rq *rq;
1299
1300         if (p->nr_cpus_allowed == 1)
1301                 goto out;
1302
1303         /* For anything but wake ups, just return the task_cpu */
1304         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1305                 goto out;
1306
1307         rq = cpu_rq(cpu);
1308
1309         rcu_read_lock();
1310         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1311
1312         /*
1313          * If the current task on @p's runqueue is an RT task, then
1314          * try to see if we can wake this RT task up on another
1315          * runqueue. Otherwise simply start this RT task
1316          * on its current runqueue.
1317          *
1318          * We want to avoid overloading runqueues. If the woken
1319          * task is a higher priority, then it will stay on this CPU
1320          * and the lower prio task should be moved to another CPU.
1321          * Even though this will probably make the lower prio task
1322          * lose its cache, we do not want to bounce a higher task
1323          * around just because it gave up its CPU, perhaps for a
1324          * lock?
1325          *
1326          * For equal prio tasks, we just let the scheduler sort it out.
1327          *
1328          * Otherwise, just let it ride on the affined RQ and the
1329          * post-schedule router will push the preempted task away
1330          *
1331          * This test is optimistic, if we get it wrong the load-balancer
1332          * will have to sort it out.
1333          */
1334         if (curr && unlikely(rt_task(curr)) &&
1335             (curr->nr_cpus_allowed < 2 ||
1336              curr->prio <= p->prio)) {
1337                 int target = find_lowest_rq(p);
1338
1339                 if (target != -1)
1340                         cpu = target;
1341         }
1342         rcu_read_unlock();
1343
1344 out:
1345         return cpu;
1346 }
1347
1348 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1349 {
1350         if (rq->curr->nr_cpus_allowed == 1)
1351                 return;
1352
1353         if (p->nr_cpus_allowed != 1
1354             && cpupri_find(&rq->rd->cpupri, p, NULL))
1355                 return;
1356
1357         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1358                 return;
1359
1360         /*
1361          * There appears to be other cpus that can accept
1362          * current and none to run 'p', so lets reschedule
1363          * to try and push current away:
1364          */
1365         requeue_task_rt(rq, p, 1);
1366         resched_task(rq->curr);
1367 }
1368
1369 #endif /* CONFIG_SMP */
1370
1371 /*
1372  * Preempt the current task with a newly woken task if needed:
1373  */
1374 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1375 {
1376         if (p->prio < rq->curr->prio) {
1377                 resched_task(rq->curr);
1378                 return;
1379         }
1380
1381 #ifdef CONFIG_SMP
1382         /*
1383          * If:
1384          *
1385          * - the newly woken task is of equal priority to the current task
1386          * - the newly woken task is non-migratable while current is migratable
1387          * - current will be preempted on the next reschedule
1388          *
1389          * we should check to see if current can readily move to a different
1390          * cpu.  If so, we will reschedule to allow the push logic to try
1391          * to move current somewhere else, making room for our non-migratable
1392          * task.
1393          */
1394         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1395                 check_preempt_equal_prio(rq, p);
1396 #endif
1397 }
1398
1399 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1400                                                    struct rt_rq *rt_rq)
1401 {
1402         struct rt_prio_array *array = &rt_rq->active;
1403         struct sched_rt_entity *next = NULL;
1404         struct list_head *queue;
1405         int idx;
1406
1407         idx = sched_find_first_bit(array->bitmap);
1408         BUG_ON(idx >= MAX_RT_PRIO);
1409
1410         queue = array->queue + idx;
1411         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1412
1413         return next;
1414 }
1415
1416 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1417 {
1418         struct sched_rt_entity *rt_se;
1419         struct task_struct *p;
1420         struct rt_rq *rt_rq  = &rq->rt;
1421
1422         do {
1423                 rt_se = pick_next_rt_entity(rq, rt_rq);
1424                 BUG_ON(!rt_se);
1425                 rt_rq = group_rt_rq(rt_se);
1426         } while (rt_rq);
1427
1428         p = rt_task_of(rt_se);
1429         p->se.exec_start = rq_clock_task(rq);
1430
1431         return p;
1432 }
1433
1434 static struct task_struct *
1435 pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1436 {
1437         struct task_struct *p;
1438         struct rt_rq *rt_rq = &rq->rt;
1439
1440         if (need_pull_rt_task(rq, prev)) {
1441                 pull_rt_task(rq);
1442                 /*
1443                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
1444                  * means a dl or stop task can slip in, in which case we need
1445                  * to re-start task selection.
1446                  */
1447                 if (unlikely((rq->stop && rq->stop->on_rq) ||
1448                              rq->dl.dl_nr_running))
1449                         return RETRY_TASK;
1450         }
1451
1452         /*
1453          * We may dequeue prev's rt_rq in put_prev_task().
1454          * So, we update time before rt_nr_running check.
1455          */
1456         if (prev->sched_class == &rt_sched_class)
1457                 update_curr_rt(rq);
1458
1459         if (!rt_rq->rt_queued)
1460                 return NULL;
1461
1462         put_prev_task(rq, prev);
1463
1464         p = _pick_next_task_rt(rq);
1465
1466         /* The running task is never eligible for pushing */
1467         if (p)
1468                 dequeue_pushable_task(rq, p);
1469
1470         set_post_schedule(rq);
1471
1472         return p;
1473 }
1474
1475 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1476 {
1477         update_curr_rt(rq);
1478
1479         /*
1480          * The previous task needs to be made eligible for pushing
1481          * if it is still active
1482          */
1483         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1484                 enqueue_pushable_task(rq, p);
1485 }
1486
1487 #ifdef CONFIG_SMP
1488
1489 /* Only try algorithms three times */
1490 #define RT_MAX_TRIES 3
1491
1492 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1493 {
1494         if (!task_running(rq, p) &&
1495             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1496                 return 1;
1497         return 0;
1498 }
1499
1500 /*
1501  * Return the highest pushable rq's task, which is suitable to be executed
1502  * on the cpu, NULL otherwise
1503  */
1504 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1505 {
1506         struct plist_head *head = &rq->rt.pushable_tasks;
1507         struct task_struct *p;
1508
1509         if (!has_pushable_tasks(rq))
1510                 return NULL;
1511
1512         plist_for_each_entry(p, head, pushable_tasks) {
1513                 if (pick_rt_task(rq, p, cpu))
1514                         return p;
1515         }
1516
1517         return NULL;
1518 }
1519
1520 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1521
1522 static int find_lowest_rq(struct task_struct *task)
1523 {
1524         struct sched_domain *sd;
1525         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1526         int this_cpu = smp_processor_id();
1527         int cpu      = task_cpu(task);
1528
1529         /* Make sure the mask is initialized first */
1530         if (unlikely(!lowest_mask))
1531                 return -1;
1532
1533         if (task->nr_cpus_allowed == 1)
1534                 return -1; /* No other targets possible */
1535
1536         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1537                 return -1; /* No targets found */
1538
1539         /*
1540          * At this point we have built a mask of cpus representing the
1541          * lowest priority tasks in the system.  Now we want to elect
1542          * the best one based on our affinity and topology.
1543          *
1544          * We prioritize the last cpu that the task executed on since
1545          * it is most likely cache-hot in that location.
1546          */
1547         if (cpumask_test_cpu(cpu, lowest_mask))
1548                 return cpu;
1549
1550         /*
1551          * Otherwise, we consult the sched_domains span maps to figure
1552          * out which cpu is logically closest to our hot cache data.
1553          */
1554         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1555                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1556
1557         rcu_read_lock();
1558         for_each_domain(cpu, sd) {
1559                 if (sd->flags & SD_WAKE_AFFINE) {
1560                         int best_cpu;
1561
1562                         /*
1563                          * "this_cpu" is cheaper to preempt than a
1564                          * remote processor.
1565                          */
1566                         if (this_cpu != -1 &&
1567                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1568                                 rcu_read_unlock();
1569                                 return this_cpu;
1570                         }
1571
1572                         best_cpu = cpumask_first_and(lowest_mask,
1573                                                      sched_domain_span(sd));
1574                         if (best_cpu < nr_cpu_ids) {
1575                                 rcu_read_unlock();
1576                                 return best_cpu;
1577                         }
1578                 }
1579         }
1580         rcu_read_unlock();
1581
1582         /*
1583          * And finally, if there were no matches within the domains
1584          * just give the caller *something* to work with from the compatible
1585          * locations.
1586          */
1587         if (this_cpu != -1)
1588                 return this_cpu;
1589
1590         cpu = cpumask_any(lowest_mask);
1591         if (cpu < nr_cpu_ids)
1592                 return cpu;
1593         return -1;
1594 }
1595
1596 /* Will lock the rq it finds */
1597 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1598 {
1599         struct rq *lowest_rq = NULL;
1600         int tries;
1601         int cpu;
1602
1603         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1604                 cpu = find_lowest_rq(task);
1605
1606                 if ((cpu == -1) || (cpu == rq->cpu))
1607                         break;
1608
1609                 lowest_rq = cpu_rq(cpu);
1610
1611                 /* if the prio of this runqueue changed, try again */
1612                 if (double_lock_balance(rq, lowest_rq)) {
1613                         /*
1614                          * We had to unlock the run queue. In
1615                          * the mean time, task could have
1616                          * migrated already or had its affinity changed.
1617                          * Also make sure that it wasn't scheduled on its rq.
1618                          */
1619                         if (unlikely(task_rq(task) != rq ||
1620                                      !cpumask_test_cpu(lowest_rq->cpu,
1621                                                        tsk_cpus_allowed(task)) ||
1622                                      task_running(rq, task) ||
1623                                      !task->on_rq)) {
1624
1625                                 double_unlock_balance(rq, lowest_rq);
1626                                 lowest_rq = NULL;
1627                                 break;
1628                         }
1629                 }
1630
1631                 /* If this rq is still suitable use it. */
1632                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1633                         break;
1634
1635                 /* try again */
1636                 double_unlock_balance(rq, lowest_rq);
1637                 lowest_rq = NULL;
1638         }
1639
1640         return lowest_rq;
1641 }
1642
1643 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1644 {
1645         struct task_struct *p;
1646
1647         if (!has_pushable_tasks(rq))
1648                 return NULL;
1649
1650         p = plist_first_entry(&rq->rt.pushable_tasks,
1651                               struct task_struct, pushable_tasks);
1652
1653         BUG_ON(rq->cpu != task_cpu(p));
1654         BUG_ON(task_current(rq, p));
1655         BUG_ON(p->nr_cpus_allowed <= 1);
1656
1657         BUG_ON(!p->on_rq);
1658         BUG_ON(!rt_task(p));
1659
1660         return p;
1661 }
1662
1663 /*
1664  * If the current CPU has more than one RT task, see if the non
1665  * running task can migrate over to a CPU that is running a task
1666  * of lesser priority.
1667  */
1668 static int push_rt_task(struct rq *rq)
1669 {
1670         struct task_struct *next_task;
1671         struct rq *lowest_rq;
1672         int ret = 0;
1673
1674         if (!rq->rt.overloaded)
1675                 return 0;
1676
1677         next_task = pick_next_pushable_task(rq);
1678         if (!next_task)
1679                 return 0;
1680
1681 retry:
1682         if (unlikely(next_task == rq->curr)) {
1683                 WARN_ON(1);
1684                 return 0;
1685         }
1686
1687         /*
1688          * It's possible that the next_task slipped in of
1689          * higher priority than current. If that's the case
1690          * just reschedule current.
1691          */
1692         if (unlikely(next_task->prio < rq->curr->prio)) {
1693                 resched_task(rq->curr);
1694                 return 0;
1695         }
1696
1697         /* We might release rq lock */
1698         get_task_struct(next_task);
1699
1700         /* find_lock_lowest_rq locks the rq if found */
1701         lowest_rq = find_lock_lowest_rq(next_task, rq);
1702         if (!lowest_rq) {
1703                 struct task_struct *task;
1704                 /*
1705                  * find_lock_lowest_rq releases rq->lock
1706                  * so it is possible that next_task has migrated.
1707                  *
1708                  * We need to make sure that the task is still on the same
1709                  * run-queue and is also still the next task eligible for
1710                  * pushing.
1711                  */
1712                 task = pick_next_pushable_task(rq);
1713                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1714                         /*
1715                          * The task hasn't migrated, and is still the next
1716                          * eligible task, but we failed to find a run-queue
1717                          * to push it to.  Do not retry in this case, since
1718                          * other cpus will pull from us when ready.
1719                          */
1720                         goto out;
1721                 }
1722
1723                 if (!task)
1724                         /* No more tasks, just exit */
1725                         goto out;
1726
1727                 /*
1728                  * Something has shifted, try again.
1729                  */
1730                 put_task_struct(next_task);
1731                 next_task = task;
1732                 goto retry;
1733         }
1734
1735         deactivate_task(rq, next_task, 0);
1736         set_task_cpu(next_task, lowest_rq->cpu);
1737         activate_task(lowest_rq, next_task, 0);
1738         ret = 1;
1739
1740         resched_task(lowest_rq->curr);
1741
1742         double_unlock_balance(rq, lowest_rq);
1743
1744 out:
1745         put_task_struct(next_task);
1746
1747         return ret;
1748 }
1749
1750 static void push_rt_tasks(struct rq *rq)
1751 {
1752         /* push_rt_task will return true if it moved an RT */
1753         while (push_rt_task(rq))
1754                 ;
1755 }
1756
1757 static int pull_rt_task(struct rq *this_rq)
1758 {
1759         int this_cpu = this_rq->cpu, ret = 0, cpu;
1760         struct task_struct *p;
1761         struct rq *src_rq;
1762
1763         if (likely(!rt_overloaded(this_rq)))
1764                 return 0;
1765
1766         /*
1767          * Match the barrier from rt_set_overloaded; this guarantees that if we
1768          * see overloaded we must also see the rto_mask bit.
1769          */
1770         smp_rmb();
1771
1772         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1773                 if (this_cpu == cpu)
1774                         continue;
1775
1776                 src_rq = cpu_rq(cpu);
1777
1778                 /*
1779                  * Don't bother taking the src_rq->lock if the next highest
1780                  * task is known to be lower-priority than our current task.
1781                  * This may look racy, but if this value is about to go
1782                  * logically higher, the src_rq will push this task away.
1783                  * And if its going logically lower, we do not care
1784                  */
1785                 if (src_rq->rt.highest_prio.next >=
1786                     this_rq->rt.highest_prio.curr)
1787                         continue;
1788
1789                 /*
1790                  * We can potentially drop this_rq's lock in
1791                  * double_lock_balance, and another CPU could
1792                  * alter this_rq
1793                  */
1794                 double_lock_balance(this_rq, src_rq);
1795
1796                 /*
1797                  * We can pull only a task, which is pushable
1798                  * on its rq, and no others.
1799                  */
1800                 p = pick_highest_pushable_task(src_rq, this_cpu);
1801
1802                 /*
1803                  * Do we have an RT task that preempts
1804                  * the to-be-scheduled task?
1805                  */
1806                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1807                         WARN_ON(p == src_rq->curr);
1808                         WARN_ON(!p->on_rq);
1809
1810                         /*
1811                          * There's a chance that p is higher in priority
1812                          * than what's currently running on its cpu.
1813                          * This is just that p is wakeing up and hasn't
1814                          * had a chance to schedule. We only pull
1815                          * p if it is lower in priority than the
1816                          * current task on the run queue
1817                          */
1818                         if (p->prio < src_rq->curr->prio)
1819                                 goto skip;
1820
1821                         ret = 1;
1822
1823                         deactivate_task(src_rq, p, 0);
1824                         set_task_cpu(p, this_cpu);
1825                         activate_task(this_rq, p, 0);
1826                         /*
1827                          * We continue with the search, just in
1828                          * case there's an even higher prio task
1829                          * in another runqueue. (low likelihood
1830                          * but possible)
1831                          */
1832                 }
1833 skip:
1834                 double_unlock_balance(this_rq, src_rq);
1835         }
1836
1837         return ret;
1838 }
1839
1840 static void post_schedule_rt(struct rq *rq)
1841 {
1842         push_rt_tasks(rq);
1843 }
1844
1845 /*
1846  * If we are not running and we are not going to reschedule soon, we should
1847  * try to push tasks away now
1848  */
1849 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1850 {
1851         if (!task_running(rq, p) &&
1852             !test_tsk_need_resched(rq->curr) &&
1853             has_pushable_tasks(rq) &&
1854             p->nr_cpus_allowed > 1 &&
1855             (dl_task(rq->curr) || rt_task(rq->curr)) &&
1856             (rq->curr->nr_cpus_allowed < 2 ||
1857              rq->curr->prio <= p->prio))
1858                 push_rt_tasks(rq);
1859 }
1860
1861 static void set_cpus_allowed_rt(struct task_struct *p,
1862                                 const struct cpumask *new_mask)
1863 {
1864         struct rq *rq;
1865         int weight;
1866
1867         BUG_ON(!rt_task(p));
1868
1869         if (!p->on_rq)
1870                 return;
1871
1872         weight = cpumask_weight(new_mask);
1873
1874         /*
1875          * Only update if the process changes its state from whether it
1876          * can migrate or not.
1877          */
1878         if ((p->nr_cpus_allowed > 1) == (weight > 1))
1879                 return;
1880
1881         rq = task_rq(p);
1882
1883         /*
1884          * The process used to be able to migrate OR it can now migrate
1885          */
1886         if (weight <= 1) {
1887                 if (!task_current(rq, p))
1888                         dequeue_pushable_task(rq, p);
1889                 BUG_ON(!rq->rt.rt_nr_migratory);
1890                 rq->rt.rt_nr_migratory--;
1891         } else {
1892                 if (!task_current(rq, p))
1893                         enqueue_pushable_task(rq, p);
1894                 rq->rt.rt_nr_migratory++;
1895         }
1896
1897         update_rt_migration(&rq->rt);
1898 }
1899
1900 /* Assumes rq->lock is held */
1901 static void rq_online_rt(struct rq *rq)
1902 {
1903         if (rq->rt.overloaded)
1904                 rt_set_overload(rq);
1905
1906         __enable_runtime(rq);
1907
1908         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1909 }
1910
1911 /* Assumes rq->lock is held */
1912 static void rq_offline_rt(struct rq *rq)
1913 {
1914         if (rq->rt.overloaded)
1915                 rt_clear_overload(rq);
1916
1917         __disable_runtime(rq);
1918
1919         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1920 }
1921
1922 /*
1923  * When switch from the rt queue, we bring ourselves to a position
1924  * that we might want to pull RT tasks from other runqueues.
1925  */
1926 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1927 {
1928         /*
1929          * If there are other RT tasks then we will reschedule
1930          * and the scheduling of the other RT tasks will handle
1931          * the balancing. But if we are the last RT task
1932          * we may need to handle the pulling of RT tasks
1933          * now.
1934          */
1935         if (!p->on_rq || rq->rt.rt_nr_running)
1936                 return;
1937
1938         if (pull_rt_task(rq))
1939                 resched_task(rq->curr);
1940 }
1941
1942 void __init init_sched_rt_class(void)
1943 {
1944         unsigned int i;
1945
1946         for_each_possible_cpu(i) {
1947                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1948                                         GFP_KERNEL, cpu_to_node(i));
1949         }
1950 }
1951 #endif /* CONFIG_SMP */
1952
1953 /*
1954  * When switching a task to RT, we may overload the runqueue
1955  * with RT tasks. In this case we try to push them off to
1956  * other runqueues.
1957  */
1958 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1959 {
1960         int check_resched = 1;
1961
1962         /*
1963          * If we are already running, then there's nothing
1964          * that needs to be done. But if we are not running
1965          * we may need to preempt the current running task.
1966          * If that current running task is also an RT task
1967          * then see if we can move to another run queue.
1968          */
1969         if (p->on_rq && rq->curr != p) {
1970 #ifdef CONFIG_SMP
1971                 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
1972                     /* Don't resched if we changed runqueues */
1973                     push_rt_task(rq) && rq != task_rq(p))
1974                         check_resched = 0;
1975 #endif /* CONFIG_SMP */
1976                 if (check_resched && p->prio < rq->curr->prio)
1977                         resched_task(rq->curr);
1978         }
1979 }
1980
1981 /*
1982  * Priority of the task has changed. This may cause
1983  * us to initiate a push or pull.
1984  */
1985 static void
1986 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1987 {
1988         if (!p->on_rq)
1989                 return;
1990
1991         if (rq->curr == p) {
1992 #ifdef CONFIG_SMP
1993                 /*
1994                  * If our priority decreases while running, we
1995                  * may need to pull tasks to this runqueue.
1996                  */
1997                 if (oldprio < p->prio)
1998                         pull_rt_task(rq);
1999                 /*
2000                  * If there's a higher priority task waiting to run
2001                  * then reschedule. Note, the above pull_rt_task
2002                  * can release the rq lock and p could migrate.
2003                  * Only reschedule if p is still on the same runqueue.
2004                  */
2005                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2006                         resched_task(p);
2007 #else
2008                 /* For UP simply resched on drop of prio */
2009                 if (oldprio < p->prio)
2010                         resched_task(p);
2011 #endif /* CONFIG_SMP */
2012         } else {
2013                 /*
2014                  * This task is not running, but if it is
2015                  * greater than the current running task
2016                  * then reschedule.
2017                  */
2018                 if (p->prio < rq->curr->prio)
2019                         resched_task(rq->curr);
2020         }
2021 }
2022
2023 static void watchdog(struct rq *rq, struct task_struct *p)
2024 {
2025         unsigned long soft, hard;
2026
2027         /* max may change after cur was read, this will be fixed next tick */
2028         soft = task_rlimit(p, RLIMIT_RTTIME);
2029         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2030
2031         if (soft != RLIM_INFINITY) {
2032                 unsigned long next;
2033
2034                 if (p->rt.watchdog_stamp != jiffies) {
2035                         p->rt.timeout++;
2036                         p->rt.watchdog_stamp = jiffies;
2037                 }
2038
2039                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2040                 if (p->rt.timeout > next)
2041                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2042         }
2043 }
2044
2045 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2046 {
2047         struct sched_rt_entity *rt_se = &p->rt;
2048
2049         update_curr_rt(rq);
2050
2051         watchdog(rq, p);
2052
2053         /*
2054          * RR tasks need a special form of timeslice management.
2055          * FIFO tasks have no timeslices.
2056          */
2057         if (p->policy != SCHED_RR)
2058                 return;
2059
2060         if (--p->rt.time_slice)
2061                 return;
2062
2063         p->rt.time_slice = sched_rr_timeslice;
2064
2065         /*
2066          * Requeue to the end of queue if we (and all of our ancestors) are not
2067          * the only element on the queue
2068          */
2069         for_each_sched_rt_entity(rt_se) {
2070                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2071                         requeue_task_rt(rq, p, 0);
2072                         set_tsk_need_resched(p);
2073                         return;
2074                 }
2075         }
2076 }
2077
2078 static void set_curr_task_rt(struct rq *rq)
2079 {
2080         struct task_struct *p = rq->curr;
2081
2082         p->se.exec_start = rq_clock_task(rq);
2083
2084         /* The running task is never eligible for pushing */
2085         dequeue_pushable_task(rq, p);
2086 }
2087
2088 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2089 {
2090         /*
2091          * Time slice is 0 for SCHED_FIFO tasks
2092          */
2093         if (task->policy == SCHED_RR)
2094                 return sched_rr_timeslice;
2095         else
2096                 return 0;
2097 }
2098
2099 const struct sched_class rt_sched_class = {
2100         .next                   = &fair_sched_class,
2101         .enqueue_task           = enqueue_task_rt,
2102         .dequeue_task           = dequeue_task_rt,
2103         .yield_task             = yield_task_rt,
2104
2105         .check_preempt_curr     = check_preempt_curr_rt,
2106
2107         .pick_next_task         = pick_next_task_rt,
2108         .put_prev_task          = put_prev_task_rt,
2109
2110 #ifdef CONFIG_SMP
2111         .select_task_rq         = select_task_rq_rt,
2112
2113         .set_cpus_allowed       = set_cpus_allowed_rt,
2114         .rq_online              = rq_online_rt,
2115         .rq_offline             = rq_offline_rt,
2116         .post_schedule          = post_schedule_rt,
2117         .task_woken             = task_woken_rt,
2118         .switched_from          = switched_from_rt,
2119 #endif
2120
2121         .set_curr_task          = set_curr_task_rt,
2122         .task_tick              = task_tick_rt,
2123
2124         .get_rr_interval        = get_rr_interval_rt,
2125
2126         .prio_changed           = prio_changed_rt,
2127         .switched_to            = switched_to_rt,
2128 };
2129
2130 #ifdef CONFIG_SCHED_DEBUG
2131 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2132
2133 void print_rt_stats(struct seq_file *m, int cpu)
2134 {
2135         rt_rq_iter_t iter;
2136         struct rt_rq *rt_rq;
2137
2138         rcu_read_lock();
2139         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2140                 print_rt_rq(m, cpu, rt_rq);
2141         rcu_read_unlock();
2142 }
2143 #endif /* CONFIG_SCHED_DEBUG */