]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/sched/rt.c
sched: Make sure to not re-read variables after validation
[karo-tx-linux.git] / kernel / sched / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11
12 struct rt_bandwidth def_rt_bandwidth;
13
14 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15 {
16         struct rt_bandwidth *rt_b =
17                 container_of(timer, struct rt_bandwidth, rt_period_timer);
18         ktime_t now;
19         int overrun;
20         int idle = 0;
21
22         for (;;) {
23                 now = hrtimer_cb_get_time(timer);
24                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25
26                 if (!overrun)
27                         break;
28
29                 idle = do_sched_rt_period_timer(rt_b, overrun);
30         }
31
32         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33 }
34
35 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36 {
37         rt_b->rt_period = ns_to_ktime(period);
38         rt_b->rt_runtime = runtime;
39
40         raw_spin_lock_init(&rt_b->rt_runtime_lock);
41
42         hrtimer_init(&rt_b->rt_period_timer,
43                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44         rt_b->rt_period_timer.function = sched_rt_period_timer;
45 }
46
47 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48 {
49         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50                 return;
51
52         if (hrtimer_active(&rt_b->rt_period_timer))
53                 return;
54
55         raw_spin_lock(&rt_b->rt_runtime_lock);
56         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57         raw_spin_unlock(&rt_b->rt_runtime_lock);
58 }
59
60 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61 {
62         struct rt_prio_array *array;
63         int i;
64
65         array = &rt_rq->active;
66         for (i = 0; i < MAX_RT_PRIO; i++) {
67                 INIT_LIST_HEAD(array->queue + i);
68                 __clear_bit(i, array->bitmap);
69         }
70         /* delimiter for bitsearch: */
71         __set_bit(MAX_RT_PRIO, array->bitmap);
72
73 #if defined CONFIG_SMP
74         rt_rq->highest_prio.curr = MAX_RT_PRIO;
75         rt_rq->highest_prio.next = MAX_RT_PRIO;
76         rt_rq->rt_nr_migratory = 0;
77         rt_rq->overloaded = 0;
78         plist_head_init(&rt_rq->pushable_tasks);
79 #endif
80
81         rt_rq->rt_time = 0;
82         rt_rq->rt_throttled = 0;
83         rt_rq->rt_runtime = 0;
84         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85 }
86
87 #ifdef CONFIG_RT_GROUP_SCHED
88 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89 {
90         hrtimer_cancel(&rt_b->rt_period_timer);
91 }
92
93 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94
95 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96 {
97 #ifdef CONFIG_SCHED_DEBUG
98         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99 #endif
100         return container_of(rt_se, struct task_struct, rt);
101 }
102
103 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104 {
105         return rt_rq->rq;
106 }
107
108 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109 {
110         return rt_se->rt_rq;
111 }
112
113 void free_rt_sched_group(struct task_group *tg)
114 {
115         int i;
116
117         if (tg->rt_se)
118                 destroy_rt_bandwidth(&tg->rt_bandwidth);
119
120         for_each_possible_cpu(i) {
121                 if (tg->rt_rq)
122                         kfree(tg->rt_rq[i]);
123                 if (tg->rt_se)
124                         kfree(tg->rt_se[i]);
125         }
126
127         kfree(tg->rt_rq);
128         kfree(tg->rt_se);
129 }
130
131 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132                 struct sched_rt_entity *rt_se, int cpu,
133                 struct sched_rt_entity *parent)
134 {
135         struct rq *rq = cpu_rq(cpu);
136
137         rt_rq->highest_prio.curr = MAX_RT_PRIO;
138         rt_rq->rt_nr_boosted = 0;
139         rt_rq->rq = rq;
140         rt_rq->tg = tg;
141
142         tg->rt_rq[cpu] = rt_rq;
143         tg->rt_se[cpu] = rt_se;
144
145         if (!rt_se)
146                 return;
147
148         if (!parent)
149                 rt_se->rt_rq = &rq->rt;
150         else
151                 rt_se->rt_rq = parent->my_q;
152
153         rt_se->my_q = rt_rq;
154         rt_se->parent = parent;
155         INIT_LIST_HEAD(&rt_se->run_list);
156 }
157
158 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159 {
160         struct rt_rq *rt_rq;
161         struct sched_rt_entity *rt_se;
162         int i;
163
164         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165         if (!tg->rt_rq)
166                 goto err;
167         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168         if (!tg->rt_se)
169                 goto err;
170
171         init_rt_bandwidth(&tg->rt_bandwidth,
172                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173
174         for_each_possible_cpu(i) {
175                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
176                                      GFP_KERNEL, cpu_to_node(i));
177                 if (!rt_rq)
178                         goto err;
179
180                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181                                      GFP_KERNEL, cpu_to_node(i));
182                 if (!rt_se)
183                         goto err_free_rq;
184
185                 init_rt_rq(rt_rq, cpu_rq(i));
186                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188         }
189
190         return 1;
191
192 err_free_rq:
193         kfree(rt_rq);
194 err:
195         return 0;
196 }
197
198 #else /* CONFIG_RT_GROUP_SCHED */
199
200 #define rt_entity_is_task(rt_se) (1)
201
202 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203 {
204         return container_of(rt_se, struct task_struct, rt);
205 }
206
207 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208 {
209         return container_of(rt_rq, struct rq, rt);
210 }
211
212 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213 {
214         struct task_struct *p = rt_task_of(rt_se);
215         struct rq *rq = task_rq(p);
216
217         return &rq->rt;
218 }
219
220 void free_rt_sched_group(struct task_group *tg) { }
221
222 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223 {
224         return 1;
225 }
226 #endif /* CONFIG_RT_GROUP_SCHED */
227
228 #ifdef CONFIG_SMP
229
230 static inline int rt_overloaded(struct rq *rq)
231 {
232         return atomic_read(&rq->rd->rto_count);
233 }
234
235 static inline void rt_set_overload(struct rq *rq)
236 {
237         if (!rq->online)
238                 return;
239
240         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
241         /*
242          * Make sure the mask is visible before we set
243          * the overload count. That is checked to determine
244          * if we should look at the mask. It would be a shame
245          * if we looked at the mask, but the mask was not
246          * updated yet.
247          */
248         wmb();
249         atomic_inc(&rq->rd->rto_count);
250 }
251
252 static inline void rt_clear_overload(struct rq *rq)
253 {
254         if (!rq->online)
255                 return;
256
257         /* the order here really doesn't matter */
258         atomic_dec(&rq->rd->rto_count);
259         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
260 }
261
262 static void update_rt_migration(struct rt_rq *rt_rq)
263 {
264         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265                 if (!rt_rq->overloaded) {
266                         rt_set_overload(rq_of_rt_rq(rt_rq));
267                         rt_rq->overloaded = 1;
268                 }
269         } else if (rt_rq->overloaded) {
270                 rt_clear_overload(rq_of_rt_rq(rt_rq));
271                 rt_rq->overloaded = 0;
272         }
273 }
274
275 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276 {
277         if (!rt_entity_is_task(rt_se))
278                 return;
279
280         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
281
282         rt_rq->rt_nr_total++;
283         if (rt_se->nr_cpus_allowed > 1)
284                 rt_rq->rt_nr_migratory++;
285
286         update_rt_migration(rt_rq);
287 }
288
289 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
290 {
291         if (!rt_entity_is_task(rt_se))
292                 return;
293
294         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
295
296         rt_rq->rt_nr_total--;
297         if (rt_se->nr_cpus_allowed > 1)
298                 rt_rq->rt_nr_migratory--;
299
300         update_rt_migration(rt_rq);
301 }
302
303 static inline int has_pushable_tasks(struct rq *rq)
304 {
305         return !plist_head_empty(&rq->rt.pushable_tasks);
306 }
307
308 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
309 {
310         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
311         plist_node_init(&p->pushable_tasks, p->prio);
312         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
313
314         /* Update the highest prio pushable task */
315         if (p->prio < rq->rt.highest_prio.next)
316                 rq->rt.highest_prio.next = p->prio;
317 }
318
319 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
320 {
321         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
322
323         /* Update the new highest prio pushable task */
324         if (has_pushable_tasks(rq)) {
325                 p = plist_first_entry(&rq->rt.pushable_tasks,
326                                       struct task_struct, pushable_tasks);
327                 rq->rt.highest_prio.next = p->prio;
328         } else
329                 rq->rt.highest_prio.next = MAX_RT_PRIO;
330 }
331
332 #else
333
334 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
335 {
336 }
337
338 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
339 {
340 }
341
342 static inline
343 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
344 {
345 }
346
347 static inline
348 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
349 {
350 }
351
352 #endif /* CONFIG_SMP */
353
354 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
355 {
356         return !list_empty(&rt_se->run_list);
357 }
358
359 #ifdef CONFIG_RT_GROUP_SCHED
360
361 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
362 {
363         if (!rt_rq->tg)
364                 return RUNTIME_INF;
365
366         return rt_rq->rt_runtime;
367 }
368
369 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
370 {
371         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
372 }
373
374 typedef struct task_group *rt_rq_iter_t;
375
376 static inline struct task_group *next_task_group(struct task_group *tg)
377 {
378         do {
379                 tg = list_entry_rcu(tg->list.next,
380                         typeof(struct task_group), list);
381         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
382
383         if (&tg->list == &task_groups)
384                 tg = NULL;
385
386         return tg;
387 }
388
389 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
390         for (iter = container_of(&task_groups, typeof(*iter), list);    \
391                 (iter = next_task_group(iter)) &&                       \
392                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
393
394 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
395 {
396         list_add_rcu(&rt_rq->leaf_rt_rq_list,
397                         &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
398 }
399
400 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
401 {
402         list_del_rcu(&rt_rq->leaf_rt_rq_list);
403 }
404
405 #define for_each_leaf_rt_rq(rt_rq, rq) \
406         list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
407
408 #define for_each_sched_rt_entity(rt_se) \
409         for (; rt_se; rt_se = rt_se->parent)
410
411 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
412 {
413         return rt_se->my_q;
414 }
415
416 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
417 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
418
419 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
420 {
421         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
422         struct sched_rt_entity *rt_se;
423
424         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
425
426         rt_se = rt_rq->tg->rt_se[cpu];
427
428         if (rt_rq->rt_nr_running) {
429                 if (rt_se && !on_rt_rq(rt_se))
430                         enqueue_rt_entity(rt_se, false);
431                 if (rt_rq->highest_prio.curr < curr->prio)
432                         resched_task(curr);
433         }
434 }
435
436 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
437 {
438         struct sched_rt_entity *rt_se;
439         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
440
441         rt_se = rt_rq->tg->rt_se[cpu];
442
443         if (rt_se && on_rt_rq(rt_se))
444                 dequeue_rt_entity(rt_se);
445 }
446
447 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
448 {
449         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
450 }
451
452 static int rt_se_boosted(struct sched_rt_entity *rt_se)
453 {
454         struct rt_rq *rt_rq = group_rt_rq(rt_se);
455         struct task_struct *p;
456
457         if (rt_rq)
458                 return !!rt_rq->rt_nr_boosted;
459
460         p = rt_task_of(rt_se);
461         return p->prio != p->normal_prio;
462 }
463
464 #ifdef CONFIG_SMP
465 static inline const struct cpumask *sched_rt_period_mask(void)
466 {
467         return cpu_rq(smp_processor_id())->rd->span;
468 }
469 #else
470 static inline const struct cpumask *sched_rt_period_mask(void)
471 {
472         return cpu_online_mask;
473 }
474 #endif
475
476 static inline
477 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
478 {
479         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
480 }
481
482 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
483 {
484         return &rt_rq->tg->rt_bandwidth;
485 }
486
487 #else /* !CONFIG_RT_GROUP_SCHED */
488
489 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
490 {
491         return rt_rq->rt_runtime;
492 }
493
494 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
495 {
496         return ktime_to_ns(def_rt_bandwidth.rt_period);
497 }
498
499 typedef struct rt_rq *rt_rq_iter_t;
500
501 #define for_each_rt_rq(rt_rq, iter, rq) \
502         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
503
504 static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
505 {
506 }
507
508 static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
509 {
510 }
511
512 #define for_each_leaf_rt_rq(rt_rq, rq) \
513         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
514
515 #define for_each_sched_rt_entity(rt_se) \
516         for (; rt_se; rt_se = NULL)
517
518 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
519 {
520         return NULL;
521 }
522
523 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
524 {
525         if (rt_rq->rt_nr_running)
526                 resched_task(rq_of_rt_rq(rt_rq)->curr);
527 }
528
529 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
530 {
531 }
532
533 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
534 {
535         return rt_rq->rt_throttled;
536 }
537
538 static inline const struct cpumask *sched_rt_period_mask(void)
539 {
540         return cpu_online_mask;
541 }
542
543 static inline
544 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
545 {
546         return &cpu_rq(cpu)->rt;
547 }
548
549 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
550 {
551         return &def_rt_bandwidth;
552 }
553
554 #endif /* CONFIG_RT_GROUP_SCHED */
555
556 #ifdef CONFIG_SMP
557 /*
558  * We ran out of runtime, see if we can borrow some from our neighbours.
559  */
560 static int do_balance_runtime(struct rt_rq *rt_rq)
561 {
562         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
563         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
564         int i, weight, more = 0;
565         u64 rt_period;
566
567         weight = cpumask_weight(rd->span);
568
569         raw_spin_lock(&rt_b->rt_runtime_lock);
570         rt_period = ktime_to_ns(rt_b->rt_period);
571         for_each_cpu(i, rd->span) {
572                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
573                 s64 diff;
574
575                 if (iter == rt_rq)
576                         continue;
577
578                 raw_spin_lock(&iter->rt_runtime_lock);
579                 /*
580                  * Either all rqs have inf runtime and there's nothing to steal
581                  * or __disable_runtime() below sets a specific rq to inf to
582                  * indicate its been disabled and disalow stealing.
583                  */
584                 if (iter->rt_runtime == RUNTIME_INF)
585                         goto next;
586
587                 /*
588                  * From runqueues with spare time, take 1/n part of their
589                  * spare time, but no more than our period.
590                  */
591                 diff = iter->rt_runtime - iter->rt_time;
592                 if (diff > 0) {
593                         diff = div_u64((u64)diff, weight);
594                         if (rt_rq->rt_runtime + diff > rt_period)
595                                 diff = rt_period - rt_rq->rt_runtime;
596                         iter->rt_runtime -= diff;
597                         rt_rq->rt_runtime += diff;
598                         more = 1;
599                         if (rt_rq->rt_runtime == rt_period) {
600                                 raw_spin_unlock(&iter->rt_runtime_lock);
601                                 break;
602                         }
603                 }
604 next:
605                 raw_spin_unlock(&iter->rt_runtime_lock);
606         }
607         raw_spin_unlock(&rt_b->rt_runtime_lock);
608
609         return more;
610 }
611
612 /*
613  * Ensure this RQ takes back all the runtime it lend to its neighbours.
614  */
615 static void __disable_runtime(struct rq *rq)
616 {
617         struct root_domain *rd = rq->rd;
618         rt_rq_iter_t iter;
619         struct rt_rq *rt_rq;
620
621         if (unlikely(!scheduler_running))
622                 return;
623
624         for_each_rt_rq(rt_rq, iter, rq) {
625                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
626                 s64 want;
627                 int i;
628
629                 raw_spin_lock(&rt_b->rt_runtime_lock);
630                 raw_spin_lock(&rt_rq->rt_runtime_lock);
631                 /*
632                  * Either we're all inf and nobody needs to borrow, or we're
633                  * already disabled and thus have nothing to do, or we have
634                  * exactly the right amount of runtime to take out.
635                  */
636                 if (rt_rq->rt_runtime == RUNTIME_INF ||
637                                 rt_rq->rt_runtime == rt_b->rt_runtime)
638                         goto balanced;
639                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
640
641                 /*
642                  * Calculate the difference between what we started out with
643                  * and what we current have, that's the amount of runtime
644                  * we lend and now have to reclaim.
645                  */
646                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
647
648                 /*
649                  * Greedy reclaim, take back as much as we can.
650                  */
651                 for_each_cpu(i, rd->span) {
652                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
653                         s64 diff;
654
655                         /*
656                          * Can't reclaim from ourselves or disabled runqueues.
657                          */
658                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
659                                 continue;
660
661                         raw_spin_lock(&iter->rt_runtime_lock);
662                         if (want > 0) {
663                                 diff = min_t(s64, iter->rt_runtime, want);
664                                 iter->rt_runtime -= diff;
665                                 want -= diff;
666                         } else {
667                                 iter->rt_runtime -= want;
668                                 want -= want;
669                         }
670                         raw_spin_unlock(&iter->rt_runtime_lock);
671
672                         if (!want)
673                                 break;
674                 }
675
676                 raw_spin_lock(&rt_rq->rt_runtime_lock);
677                 /*
678                  * We cannot be left wanting - that would mean some runtime
679                  * leaked out of the system.
680                  */
681                 BUG_ON(want);
682 balanced:
683                 /*
684                  * Disable all the borrow logic by pretending we have inf
685                  * runtime - in which case borrowing doesn't make sense.
686                  */
687                 rt_rq->rt_runtime = RUNTIME_INF;
688                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
689                 raw_spin_unlock(&rt_b->rt_runtime_lock);
690         }
691 }
692
693 static void disable_runtime(struct rq *rq)
694 {
695         unsigned long flags;
696
697         raw_spin_lock_irqsave(&rq->lock, flags);
698         __disable_runtime(rq);
699         raw_spin_unlock_irqrestore(&rq->lock, flags);
700 }
701
702 static void __enable_runtime(struct rq *rq)
703 {
704         rt_rq_iter_t iter;
705         struct rt_rq *rt_rq;
706
707         if (unlikely(!scheduler_running))
708                 return;
709
710         /*
711          * Reset each runqueue's bandwidth settings
712          */
713         for_each_rt_rq(rt_rq, iter, rq) {
714                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
715
716                 raw_spin_lock(&rt_b->rt_runtime_lock);
717                 raw_spin_lock(&rt_rq->rt_runtime_lock);
718                 rt_rq->rt_runtime = rt_b->rt_runtime;
719                 rt_rq->rt_time = 0;
720                 rt_rq->rt_throttled = 0;
721                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
722                 raw_spin_unlock(&rt_b->rt_runtime_lock);
723         }
724 }
725
726 static void enable_runtime(struct rq *rq)
727 {
728         unsigned long flags;
729
730         raw_spin_lock_irqsave(&rq->lock, flags);
731         __enable_runtime(rq);
732         raw_spin_unlock_irqrestore(&rq->lock, flags);
733 }
734
735 int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
736 {
737         int cpu = (int)(long)hcpu;
738
739         switch (action) {
740         case CPU_DOWN_PREPARE:
741         case CPU_DOWN_PREPARE_FROZEN:
742                 disable_runtime(cpu_rq(cpu));
743                 return NOTIFY_OK;
744
745         case CPU_DOWN_FAILED:
746         case CPU_DOWN_FAILED_FROZEN:
747         case CPU_ONLINE:
748         case CPU_ONLINE_FROZEN:
749                 enable_runtime(cpu_rq(cpu));
750                 return NOTIFY_OK;
751
752         default:
753                 return NOTIFY_DONE;
754         }
755 }
756
757 static int balance_runtime(struct rt_rq *rt_rq)
758 {
759         int more = 0;
760
761         if (!sched_feat(RT_RUNTIME_SHARE))
762                 return more;
763
764         if (rt_rq->rt_time > rt_rq->rt_runtime) {
765                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
766                 more = do_balance_runtime(rt_rq);
767                 raw_spin_lock(&rt_rq->rt_runtime_lock);
768         }
769
770         return more;
771 }
772 #else /* !CONFIG_SMP */
773 static inline int balance_runtime(struct rt_rq *rt_rq)
774 {
775         return 0;
776 }
777 #endif /* CONFIG_SMP */
778
779 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
780 {
781         int i, idle = 1, throttled = 0;
782         const struct cpumask *span;
783
784         span = sched_rt_period_mask();
785         for_each_cpu(i, span) {
786                 int enqueue = 0;
787                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
788                 struct rq *rq = rq_of_rt_rq(rt_rq);
789
790                 raw_spin_lock(&rq->lock);
791                 if (rt_rq->rt_time) {
792                         u64 runtime;
793
794                         raw_spin_lock(&rt_rq->rt_runtime_lock);
795                         if (rt_rq->rt_throttled)
796                                 balance_runtime(rt_rq);
797                         runtime = rt_rq->rt_runtime;
798                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
799                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
800                                 rt_rq->rt_throttled = 0;
801                                 enqueue = 1;
802
803                                 /*
804                                  * Force a clock update if the CPU was idle,
805                                  * lest wakeup -> unthrottle time accumulate.
806                                  */
807                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
808                                         rq->skip_clock_update = -1;
809                         }
810                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
811                                 idle = 0;
812                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
813                 } else if (rt_rq->rt_nr_running) {
814                         idle = 0;
815                         if (!rt_rq_throttled(rt_rq))
816                                 enqueue = 1;
817                 }
818                 if (rt_rq->rt_throttled)
819                         throttled = 1;
820
821                 if (enqueue)
822                         sched_rt_rq_enqueue(rt_rq);
823                 raw_spin_unlock(&rq->lock);
824         }
825
826         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
827                 return 1;
828
829         return idle;
830 }
831
832 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
833 {
834 #ifdef CONFIG_RT_GROUP_SCHED
835         struct rt_rq *rt_rq = group_rt_rq(rt_se);
836
837         if (rt_rq)
838                 return rt_rq->highest_prio.curr;
839 #endif
840
841         return rt_task_of(rt_se)->prio;
842 }
843
844 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
845 {
846         u64 runtime = sched_rt_runtime(rt_rq);
847
848         if (rt_rq->rt_throttled)
849                 return rt_rq_throttled(rt_rq);
850
851         if (runtime >= sched_rt_period(rt_rq))
852                 return 0;
853
854         balance_runtime(rt_rq);
855         runtime = sched_rt_runtime(rt_rq);
856         if (runtime == RUNTIME_INF)
857                 return 0;
858
859         if (rt_rq->rt_time > runtime) {
860                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
861
862                 /*
863                  * Don't actually throttle groups that have no runtime assigned
864                  * but accrue some time due to boosting.
865                  */
866                 if (likely(rt_b->rt_runtime)) {
867                         static bool once = false;
868
869                         rt_rq->rt_throttled = 1;
870
871                         if (!once) {
872                                 once = true;
873                                 printk_sched("sched: RT throttling activated\n");
874                         }
875                 } else {
876                         /*
877                          * In case we did anyway, make it go away,
878                          * replenishment is a joke, since it will replenish us
879                          * with exactly 0 ns.
880                          */
881                         rt_rq->rt_time = 0;
882                 }
883
884                 if (rt_rq_throttled(rt_rq)) {
885                         sched_rt_rq_dequeue(rt_rq);
886                         return 1;
887                 }
888         }
889
890         return 0;
891 }
892
893 /*
894  * Update the current task's runtime statistics. Skip current tasks that
895  * are not in our scheduling class.
896  */
897 static void update_curr_rt(struct rq *rq)
898 {
899         struct task_struct *curr = rq->curr;
900         struct sched_rt_entity *rt_se = &curr->rt;
901         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
902         u64 delta_exec;
903
904         if (curr->sched_class != &rt_sched_class)
905                 return;
906
907         delta_exec = rq->clock_task - curr->se.exec_start;
908         if (unlikely((s64)delta_exec < 0))
909                 delta_exec = 0;
910
911         schedstat_set(curr->se.statistics.exec_max,
912                       max(curr->se.statistics.exec_max, delta_exec));
913
914         curr->se.sum_exec_runtime += delta_exec;
915         account_group_exec_runtime(curr, delta_exec);
916
917         curr->se.exec_start = rq->clock_task;
918         cpuacct_charge(curr, delta_exec);
919
920         sched_rt_avg_update(rq, delta_exec);
921
922         if (!rt_bandwidth_enabled())
923                 return;
924
925         for_each_sched_rt_entity(rt_se) {
926                 rt_rq = rt_rq_of_se(rt_se);
927
928                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
929                         raw_spin_lock(&rt_rq->rt_runtime_lock);
930                         rt_rq->rt_time += delta_exec;
931                         if (sched_rt_runtime_exceeded(rt_rq))
932                                 resched_task(curr);
933                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
934                 }
935         }
936 }
937
938 #if defined CONFIG_SMP
939
940 static void
941 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
942 {
943         struct rq *rq = rq_of_rt_rq(rt_rq);
944
945         if (rq->online && prio < prev_prio)
946                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
947 }
948
949 static void
950 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
951 {
952         struct rq *rq = rq_of_rt_rq(rt_rq);
953
954         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
955                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
956 }
957
958 #else /* CONFIG_SMP */
959
960 static inline
961 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
962 static inline
963 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
964
965 #endif /* CONFIG_SMP */
966
967 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
968 static void
969 inc_rt_prio(struct rt_rq *rt_rq, int prio)
970 {
971         int prev_prio = rt_rq->highest_prio.curr;
972
973         if (prio < prev_prio)
974                 rt_rq->highest_prio.curr = prio;
975
976         inc_rt_prio_smp(rt_rq, prio, prev_prio);
977 }
978
979 static void
980 dec_rt_prio(struct rt_rq *rt_rq, int prio)
981 {
982         int prev_prio = rt_rq->highest_prio.curr;
983
984         if (rt_rq->rt_nr_running) {
985
986                 WARN_ON(prio < prev_prio);
987
988                 /*
989                  * This may have been our highest task, and therefore
990                  * we may have some recomputation to do
991                  */
992                 if (prio == prev_prio) {
993                         struct rt_prio_array *array = &rt_rq->active;
994
995                         rt_rq->highest_prio.curr =
996                                 sched_find_first_bit(array->bitmap);
997                 }
998
999         } else
1000                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1001
1002         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1003 }
1004
1005 #else
1006
1007 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1008 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1009
1010 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1011
1012 #ifdef CONFIG_RT_GROUP_SCHED
1013
1014 static void
1015 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1016 {
1017         if (rt_se_boosted(rt_se))
1018                 rt_rq->rt_nr_boosted++;
1019
1020         if (rt_rq->tg)
1021                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1022 }
1023
1024 static void
1025 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1026 {
1027         if (rt_se_boosted(rt_se))
1028                 rt_rq->rt_nr_boosted--;
1029
1030         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1031 }
1032
1033 #else /* CONFIG_RT_GROUP_SCHED */
1034
1035 static void
1036 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1037 {
1038         start_rt_bandwidth(&def_rt_bandwidth);
1039 }
1040
1041 static inline
1042 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1043
1044 #endif /* CONFIG_RT_GROUP_SCHED */
1045
1046 static inline
1047 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1048 {
1049         int prio = rt_se_prio(rt_se);
1050
1051         WARN_ON(!rt_prio(prio));
1052         rt_rq->rt_nr_running++;
1053
1054         inc_rt_prio(rt_rq, prio);
1055         inc_rt_migration(rt_se, rt_rq);
1056         inc_rt_group(rt_se, rt_rq);
1057 }
1058
1059 static inline
1060 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1061 {
1062         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1063         WARN_ON(!rt_rq->rt_nr_running);
1064         rt_rq->rt_nr_running--;
1065
1066         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1067         dec_rt_migration(rt_se, rt_rq);
1068         dec_rt_group(rt_se, rt_rq);
1069 }
1070
1071 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1072 {
1073         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1074         struct rt_prio_array *array = &rt_rq->active;
1075         struct rt_rq *group_rq = group_rt_rq(rt_se);
1076         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1077
1078         /*
1079          * Don't enqueue the group if its throttled, or when empty.
1080          * The latter is a consequence of the former when a child group
1081          * get throttled and the current group doesn't have any other
1082          * active members.
1083          */
1084         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1085                 return;
1086
1087         if (!rt_rq->rt_nr_running)
1088                 list_add_leaf_rt_rq(rt_rq);
1089
1090         if (head)
1091                 list_add(&rt_se->run_list, queue);
1092         else
1093                 list_add_tail(&rt_se->run_list, queue);
1094         __set_bit(rt_se_prio(rt_se), array->bitmap);
1095
1096         inc_rt_tasks(rt_se, rt_rq);
1097 }
1098
1099 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1100 {
1101         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1102         struct rt_prio_array *array = &rt_rq->active;
1103
1104         list_del_init(&rt_se->run_list);
1105         if (list_empty(array->queue + rt_se_prio(rt_se)))
1106                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1107
1108         dec_rt_tasks(rt_se, rt_rq);
1109         if (!rt_rq->rt_nr_running)
1110                 list_del_leaf_rt_rq(rt_rq);
1111 }
1112
1113 /*
1114  * Because the prio of an upper entry depends on the lower
1115  * entries, we must remove entries top - down.
1116  */
1117 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1118 {
1119         struct sched_rt_entity *back = NULL;
1120
1121         for_each_sched_rt_entity(rt_se) {
1122                 rt_se->back = back;
1123                 back = rt_se;
1124         }
1125
1126         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1127                 if (on_rt_rq(rt_se))
1128                         __dequeue_rt_entity(rt_se);
1129         }
1130 }
1131
1132 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1133 {
1134         dequeue_rt_stack(rt_se);
1135         for_each_sched_rt_entity(rt_se)
1136                 __enqueue_rt_entity(rt_se, head);
1137 }
1138
1139 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1140 {
1141         dequeue_rt_stack(rt_se);
1142
1143         for_each_sched_rt_entity(rt_se) {
1144                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1145
1146                 if (rt_rq && rt_rq->rt_nr_running)
1147                         __enqueue_rt_entity(rt_se, false);
1148         }
1149 }
1150
1151 /*
1152  * Adding/removing a task to/from a priority array:
1153  */
1154 static void
1155 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1156 {
1157         struct sched_rt_entity *rt_se = &p->rt;
1158
1159         if (flags & ENQUEUE_WAKEUP)
1160                 rt_se->timeout = 0;
1161
1162         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1163
1164         if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
1165                 enqueue_pushable_task(rq, p);
1166
1167         inc_nr_running(rq);
1168 }
1169
1170 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1171 {
1172         struct sched_rt_entity *rt_se = &p->rt;
1173
1174         update_curr_rt(rq);
1175         dequeue_rt_entity(rt_se);
1176
1177         dequeue_pushable_task(rq, p);
1178
1179         dec_nr_running(rq);
1180 }
1181
1182 /*
1183  * Put task to the head or the end of the run list without the overhead of
1184  * dequeue followed by enqueue.
1185  */
1186 static void
1187 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1188 {
1189         if (on_rt_rq(rt_se)) {
1190                 struct rt_prio_array *array = &rt_rq->active;
1191                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1192
1193                 if (head)
1194                         list_move(&rt_se->run_list, queue);
1195                 else
1196                         list_move_tail(&rt_se->run_list, queue);
1197         }
1198 }
1199
1200 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1201 {
1202         struct sched_rt_entity *rt_se = &p->rt;
1203         struct rt_rq *rt_rq;
1204
1205         for_each_sched_rt_entity(rt_se) {
1206                 rt_rq = rt_rq_of_se(rt_se);
1207                 requeue_rt_entity(rt_rq, rt_se, head);
1208         }
1209 }
1210
1211 static void yield_task_rt(struct rq *rq)
1212 {
1213         requeue_task_rt(rq, rq->curr, 0);
1214 }
1215
1216 #ifdef CONFIG_SMP
1217 static int find_lowest_rq(struct task_struct *task);
1218
1219 static int
1220 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1221 {
1222         struct task_struct *curr;
1223         struct rq *rq;
1224         int cpu;
1225
1226         cpu = task_cpu(p);
1227
1228         if (p->rt.nr_cpus_allowed == 1)
1229                 goto out;
1230
1231         /* For anything but wake ups, just return the task_cpu */
1232         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1233                 goto out;
1234
1235         rq = cpu_rq(cpu);
1236
1237         rcu_read_lock();
1238         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1239
1240         /*
1241          * If the current task on @p's runqueue is an RT task, then
1242          * try to see if we can wake this RT task up on another
1243          * runqueue. Otherwise simply start this RT task
1244          * on its current runqueue.
1245          *
1246          * We want to avoid overloading runqueues. If the woken
1247          * task is a higher priority, then it will stay on this CPU
1248          * and the lower prio task should be moved to another CPU.
1249          * Even though this will probably make the lower prio task
1250          * lose its cache, we do not want to bounce a higher task
1251          * around just because it gave up its CPU, perhaps for a
1252          * lock?
1253          *
1254          * For equal prio tasks, we just let the scheduler sort it out.
1255          *
1256          * Otherwise, just let it ride on the affined RQ and the
1257          * post-schedule router will push the preempted task away
1258          *
1259          * This test is optimistic, if we get it wrong the load-balancer
1260          * will have to sort it out.
1261          */
1262         if (curr && unlikely(rt_task(curr)) &&
1263             (curr->rt.nr_cpus_allowed < 2 ||
1264              curr->prio <= p->prio) &&
1265             (p->rt.nr_cpus_allowed > 1)) {
1266                 int target = find_lowest_rq(p);
1267
1268                 if (target != -1)
1269                         cpu = target;
1270         }
1271         rcu_read_unlock();
1272
1273 out:
1274         return cpu;
1275 }
1276
1277 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1278 {
1279         if (rq->curr->rt.nr_cpus_allowed == 1)
1280                 return;
1281
1282         if (p->rt.nr_cpus_allowed != 1
1283             && cpupri_find(&rq->rd->cpupri, p, NULL))
1284                 return;
1285
1286         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1287                 return;
1288
1289         /*
1290          * There appears to be other cpus that can accept
1291          * current and none to run 'p', so lets reschedule
1292          * to try and push current away:
1293          */
1294         requeue_task_rt(rq, p, 1);
1295         resched_task(rq->curr);
1296 }
1297
1298 #endif /* CONFIG_SMP */
1299
1300 /*
1301  * Preempt the current task with a newly woken task if needed:
1302  */
1303 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1304 {
1305         if (p->prio < rq->curr->prio) {
1306                 resched_task(rq->curr);
1307                 return;
1308         }
1309
1310 #ifdef CONFIG_SMP
1311         /*
1312          * If:
1313          *
1314          * - the newly woken task is of equal priority to the current task
1315          * - the newly woken task is non-migratable while current is migratable
1316          * - current will be preempted on the next reschedule
1317          *
1318          * we should check to see if current can readily move to a different
1319          * cpu.  If so, we will reschedule to allow the push logic to try
1320          * to move current somewhere else, making room for our non-migratable
1321          * task.
1322          */
1323         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1324                 check_preempt_equal_prio(rq, p);
1325 #endif
1326 }
1327
1328 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1329                                                    struct rt_rq *rt_rq)
1330 {
1331         struct rt_prio_array *array = &rt_rq->active;
1332         struct sched_rt_entity *next = NULL;
1333         struct list_head *queue;
1334         int idx;
1335
1336         idx = sched_find_first_bit(array->bitmap);
1337         BUG_ON(idx >= MAX_RT_PRIO);
1338
1339         queue = array->queue + idx;
1340         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1341
1342         return next;
1343 }
1344
1345 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1346 {
1347         struct sched_rt_entity *rt_se;
1348         struct task_struct *p;
1349         struct rt_rq *rt_rq;
1350
1351         rt_rq = &rq->rt;
1352
1353         if (!rt_rq->rt_nr_running)
1354                 return NULL;
1355
1356         if (rt_rq_throttled(rt_rq))
1357                 return NULL;
1358
1359         do {
1360                 rt_se = pick_next_rt_entity(rq, rt_rq);
1361                 BUG_ON(!rt_se);
1362                 rt_rq = group_rt_rq(rt_se);
1363         } while (rt_rq);
1364
1365         p = rt_task_of(rt_se);
1366         p->se.exec_start = rq->clock_task;
1367
1368         return p;
1369 }
1370
1371 static struct task_struct *pick_next_task_rt(struct rq *rq)
1372 {
1373         struct task_struct *p = _pick_next_task_rt(rq);
1374
1375         /* The running task is never eligible for pushing */
1376         if (p)
1377                 dequeue_pushable_task(rq, p);
1378
1379 #ifdef CONFIG_SMP
1380         /*
1381          * We detect this state here so that we can avoid taking the RQ
1382          * lock again later if there is no need to push
1383          */
1384         rq->post_schedule = has_pushable_tasks(rq);
1385 #endif
1386
1387         return p;
1388 }
1389
1390 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1391 {
1392         update_curr_rt(rq);
1393
1394         /*
1395          * The previous task needs to be made eligible for pushing
1396          * if it is still active
1397          */
1398         if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1399                 enqueue_pushable_task(rq, p);
1400 }
1401
1402 #ifdef CONFIG_SMP
1403
1404 /* Only try algorithms three times */
1405 #define RT_MAX_TRIES 3
1406
1407 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1408 {
1409         if (!task_running(rq, p) &&
1410             (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1411             (p->rt.nr_cpus_allowed > 1))
1412                 return 1;
1413         return 0;
1414 }
1415
1416 /* Return the second highest RT task, NULL otherwise */
1417 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1418 {
1419         struct task_struct *next = NULL;
1420         struct sched_rt_entity *rt_se;
1421         struct rt_prio_array *array;
1422         struct rt_rq *rt_rq;
1423         int idx;
1424
1425         for_each_leaf_rt_rq(rt_rq, rq) {
1426                 array = &rt_rq->active;
1427                 idx = sched_find_first_bit(array->bitmap);
1428 next_idx:
1429                 if (idx >= MAX_RT_PRIO)
1430                         continue;
1431                 if (next && next->prio <= idx)
1432                         continue;
1433                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1434                         struct task_struct *p;
1435
1436                         if (!rt_entity_is_task(rt_se))
1437                                 continue;
1438
1439                         p = rt_task_of(rt_se);
1440                         if (pick_rt_task(rq, p, cpu)) {
1441                                 next = p;
1442                                 break;
1443                         }
1444                 }
1445                 if (!next) {
1446                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1447                         goto next_idx;
1448                 }
1449         }
1450
1451         return next;
1452 }
1453
1454 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1455
1456 static int find_lowest_rq(struct task_struct *task)
1457 {
1458         struct sched_domain *sd;
1459         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1460         int this_cpu = smp_processor_id();
1461         int cpu      = task_cpu(task);
1462
1463         /* Make sure the mask is initialized first */
1464         if (unlikely(!lowest_mask))
1465                 return -1;
1466
1467         if (task->rt.nr_cpus_allowed == 1)
1468                 return -1; /* No other targets possible */
1469
1470         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1471                 return -1; /* No targets found */
1472
1473         /*
1474          * At this point we have built a mask of cpus representing the
1475          * lowest priority tasks in the system.  Now we want to elect
1476          * the best one based on our affinity and topology.
1477          *
1478          * We prioritize the last cpu that the task executed on since
1479          * it is most likely cache-hot in that location.
1480          */
1481         if (cpumask_test_cpu(cpu, lowest_mask))
1482                 return cpu;
1483
1484         /*
1485          * Otherwise, we consult the sched_domains span maps to figure
1486          * out which cpu is logically closest to our hot cache data.
1487          */
1488         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1489                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1490
1491         rcu_read_lock();
1492         for_each_domain(cpu, sd) {
1493                 if (sd->flags & SD_WAKE_AFFINE) {
1494                         int best_cpu;
1495
1496                         /*
1497                          * "this_cpu" is cheaper to preempt than a
1498                          * remote processor.
1499                          */
1500                         if (this_cpu != -1 &&
1501                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1502                                 rcu_read_unlock();
1503                                 return this_cpu;
1504                         }
1505
1506                         best_cpu = cpumask_first_and(lowest_mask,
1507                                                      sched_domain_span(sd));
1508                         if (best_cpu < nr_cpu_ids) {
1509                                 rcu_read_unlock();
1510                                 return best_cpu;
1511                         }
1512                 }
1513         }
1514         rcu_read_unlock();
1515
1516         /*
1517          * And finally, if there were no matches within the domains
1518          * just give the caller *something* to work with from the compatible
1519          * locations.
1520          */
1521         if (this_cpu != -1)
1522                 return this_cpu;
1523
1524         cpu = cpumask_any(lowest_mask);
1525         if (cpu < nr_cpu_ids)
1526                 return cpu;
1527         return -1;
1528 }
1529
1530 /* Will lock the rq it finds */
1531 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1532 {
1533         struct rq *lowest_rq = NULL;
1534         int tries;
1535         int cpu;
1536
1537         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1538                 cpu = find_lowest_rq(task);
1539
1540                 if ((cpu == -1) || (cpu == rq->cpu))
1541                         break;
1542
1543                 lowest_rq = cpu_rq(cpu);
1544
1545                 /* if the prio of this runqueue changed, try again */
1546                 if (double_lock_balance(rq, lowest_rq)) {
1547                         /*
1548                          * We had to unlock the run queue. In
1549                          * the mean time, task could have
1550                          * migrated already or had its affinity changed.
1551                          * Also make sure that it wasn't scheduled on its rq.
1552                          */
1553                         if (unlikely(task_rq(task) != rq ||
1554                                      !cpumask_test_cpu(lowest_rq->cpu,
1555                                                        tsk_cpus_allowed(task)) ||
1556                                      task_running(rq, task) ||
1557                                      !task->on_rq)) {
1558
1559                                 raw_spin_unlock(&lowest_rq->lock);
1560                                 lowest_rq = NULL;
1561                                 break;
1562                         }
1563                 }
1564
1565                 /* If this rq is still suitable use it. */
1566                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1567                         break;
1568
1569                 /* try again */
1570                 double_unlock_balance(rq, lowest_rq);
1571                 lowest_rq = NULL;
1572         }
1573
1574         return lowest_rq;
1575 }
1576
1577 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1578 {
1579         struct task_struct *p;
1580
1581         if (!has_pushable_tasks(rq))
1582                 return NULL;
1583
1584         p = plist_first_entry(&rq->rt.pushable_tasks,
1585                               struct task_struct, pushable_tasks);
1586
1587         BUG_ON(rq->cpu != task_cpu(p));
1588         BUG_ON(task_current(rq, p));
1589         BUG_ON(p->rt.nr_cpus_allowed <= 1);
1590
1591         BUG_ON(!p->on_rq);
1592         BUG_ON(!rt_task(p));
1593
1594         return p;
1595 }
1596
1597 /*
1598  * If the current CPU has more than one RT task, see if the non
1599  * running task can migrate over to a CPU that is running a task
1600  * of lesser priority.
1601  */
1602 static int push_rt_task(struct rq *rq)
1603 {
1604         struct task_struct *next_task;
1605         struct rq *lowest_rq;
1606         int ret = 0;
1607
1608         if (!rq->rt.overloaded)
1609                 return 0;
1610
1611         next_task = pick_next_pushable_task(rq);
1612         if (!next_task)
1613                 return 0;
1614
1615 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1616        if (unlikely(task_running(rq, next_task)))
1617                return 0;
1618 #endif
1619
1620 retry:
1621         if (unlikely(next_task == rq->curr)) {
1622                 WARN_ON(1);
1623                 return 0;
1624         }
1625
1626         /*
1627          * It's possible that the next_task slipped in of
1628          * higher priority than current. If that's the case
1629          * just reschedule current.
1630          */
1631         if (unlikely(next_task->prio < rq->curr->prio)) {
1632                 resched_task(rq->curr);
1633                 return 0;
1634         }
1635
1636         /* We might release rq lock */
1637         get_task_struct(next_task);
1638
1639         /* find_lock_lowest_rq locks the rq if found */
1640         lowest_rq = find_lock_lowest_rq(next_task, rq);
1641         if (!lowest_rq) {
1642                 struct task_struct *task;
1643                 /*
1644                  * find_lock_lowest_rq releases rq->lock
1645                  * so it is possible that next_task has migrated.
1646                  *
1647                  * We need to make sure that the task is still on the same
1648                  * run-queue and is also still the next task eligible for
1649                  * pushing.
1650                  */
1651                 task = pick_next_pushable_task(rq);
1652                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1653                         /*
1654                          * The task hasn't migrated, and is still the next
1655                          * eligible task, but we failed to find a run-queue
1656                          * to push it to.  Do not retry in this case, since
1657                          * other cpus will pull from us when ready.
1658                          */
1659                         goto out;
1660                 }
1661
1662                 if (!task)
1663                         /* No more tasks, just exit */
1664                         goto out;
1665
1666                 /*
1667                  * Something has shifted, try again.
1668                  */
1669                 put_task_struct(next_task);
1670                 next_task = task;
1671                 goto retry;
1672         }
1673
1674         deactivate_task(rq, next_task, 0);
1675         set_task_cpu(next_task, lowest_rq->cpu);
1676         activate_task(lowest_rq, next_task, 0);
1677         ret = 1;
1678
1679         resched_task(lowest_rq->curr);
1680
1681         double_unlock_balance(rq, lowest_rq);
1682
1683 out:
1684         put_task_struct(next_task);
1685
1686         return ret;
1687 }
1688
1689 static void push_rt_tasks(struct rq *rq)
1690 {
1691         /* push_rt_task will return true if it moved an RT */
1692         while (push_rt_task(rq))
1693                 ;
1694 }
1695
1696 static int pull_rt_task(struct rq *this_rq)
1697 {
1698         int this_cpu = this_rq->cpu, ret = 0, cpu;
1699         struct task_struct *p;
1700         struct rq *src_rq;
1701
1702         if (likely(!rt_overloaded(this_rq)))
1703                 return 0;
1704
1705         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1706                 if (this_cpu == cpu)
1707                         continue;
1708
1709                 src_rq = cpu_rq(cpu);
1710
1711                 /*
1712                  * Don't bother taking the src_rq->lock if the next highest
1713                  * task is known to be lower-priority than our current task.
1714                  * This may look racy, but if this value is about to go
1715                  * logically higher, the src_rq will push this task away.
1716                  * And if its going logically lower, we do not care
1717                  */
1718                 if (src_rq->rt.highest_prio.next >=
1719                     this_rq->rt.highest_prio.curr)
1720                         continue;
1721
1722                 /*
1723                  * We can potentially drop this_rq's lock in
1724                  * double_lock_balance, and another CPU could
1725                  * alter this_rq
1726                  */
1727                 double_lock_balance(this_rq, src_rq);
1728
1729                 /*
1730                  * Are there still pullable RT tasks?
1731                  */
1732                 if (src_rq->rt.rt_nr_running <= 1)
1733                         goto skip;
1734
1735                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1736
1737                 /*
1738                  * Do we have an RT task that preempts
1739                  * the to-be-scheduled task?
1740                  */
1741                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1742                         WARN_ON(p == src_rq->curr);
1743                         WARN_ON(!p->on_rq);
1744
1745                         /*
1746                          * There's a chance that p is higher in priority
1747                          * than what's currently running on its cpu.
1748                          * This is just that p is wakeing up and hasn't
1749                          * had a chance to schedule. We only pull
1750                          * p if it is lower in priority than the
1751                          * current task on the run queue
1752                          */
1753                         if (p->prio < src_rq->curr->prio)
1754                                 goto skip;
1755
1756                         ret = 1;
1757
1758                         deactivate_task(src_rq, p, 0);
1759                         set_task_cpu(p, this_cpu);
1760                         activate_task(this_rq, p, 0);
1761                         /*
1762                          * We continue with the search, just in
1763                          * case there's an even higher prio task
1764                          * in another runqueue. (low likelihood
1765                          * but possible)
1766                          */
1767                 }
1768 skip:
1769                 double_unlock_balance(this_rq, src_rq);
1770         }
1771
1772         return ret;
1773 }
1774
1775 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1776 {
1777         /* Try to pull RT tasks here if we lower this rq's prio */
1778         if (rq->rt.highest_prio.curr > prev->prio)
1779                 pull_rt_task(rq);
1780 }
1781
1782 static void post_schedule_rt(struct rq *rq)
1783 {
1784         push_rt_tasks(rq);
1785 }
1786
1787 /*
1788  * If we are not running and we are not going to reschedule soon, we should
1789  * try to push tasks away now
1790  */
1791 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1792 {
1793         if (!task_running(rq, p) &&
1794             !test_tsk_need_resched(rq->curr) &&
1795             has_pushable_tasks(rq) &&
1796             p->rt.nr_cpus_allowed > 1 &&
1797             rt_task(rq->curr) &&
1798             (rq->curr->rt.nr_cpus_allowed < 2 ||
1799              rq->curr->prio <= p->prio))
1800                 push_rt_tasks(rq);
1801 }
1802
1803 static void set_cpus_allowed_rt(struct task_struct *p,
1804                                 const struct cpumask *new_mask)
1805 {
1806         struct rq *rq;
1807         int weight;
1808
1809         BUG_ON(!rt_task(p));
1810
1811         if (!p->on_rq)
1812                 return;
1813
1814         weight = cpumask_weight(new_mask);
1815
1816         /*
1817          * Only update if the process changes its state from whether it
1818          * can migrate or not.
1819          */
1820         if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
1821                 return;
1822
1823         rq = task_rq(p);
1824
1825         /*
1826          * The process used to be able to migrate OR it can now migrate
1827          */
1828         if (weight <= 1) {
1829                 if (!task_current(rq, p))
1830                         dequeue_pushable_task(rq, p);
1831                 BUG_ON(!rq->rt.rt_nr_migratory);
1832                 rq->rt.rt_nr_migratory--;
1833         } else {
1834                 if (!task_current(rq, p))
1835                         enqueue_pushable_task(rq, p);
1836                 rq->rt.rt_nr_migratory++;
1837         }
1838
1839         update_rt_migration(&rq->rt);
1840 }
1841
1842 /* Assumes rq->lock is held */
1843 static void rq_online_rt(struct rq *rq)
1844 {
1845         if (rq->rt.overloaded)
1846                 rt_set_overload(rq);
1847
1848         __enable_runtime(rq);
1849
1850         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1851 }
1852
1853 /* Assumes rq->lock is held */
1854 static void rq_offline_rt(struct rq *rq)
1855 {
1856         if (rq->rt.overloaded)
1857                 rt_clear_overload(rq);
1858
1859         __disable_runtime(rq);
1860
1861         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1862 }
1863
1864 /*
1865  * When switch from the rt queue, we bring ourselves to a position
1866  * that we might want to pull RT tasks from other runqueues.
1867  */
1868 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1869 {
1870         /*
1871          * If there are other RT tasks then we will reschedule
1872          * and the scheduling of the other RT tasks will handle
1873          * the balancing. But if we are the last RT task
1874          * we may need to handle the pulling of RT tasks
1875          * now.
1876          */
1877         if (p->on_rq && !rq->rt.rt_nr_running)
1878                 pull_rt_task(rq);
1879 }
1880
1881 void init_sched_rt_class(void)
1882 {
1883         unsigned int i;
1884
1885         for_each_possible_cpu(i) {
1886                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1887                                         GFP_KERNEL, cpu_to_node(i));
1888         }
1889 }
1890 #endif /* CONFIG_SMP */
1891
1892 /*
1893  * When switching a task to RT, we may overload the runqueue
1894  * with RT tasks. In this case we try to push them off to
1895  * other runqueues.
1896  */
1897 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1898 {
1899         int check_resched = 1;
1900
1901         /*
1902          * If we are already running, then there's nothing
1903          * that needs to be done. But if we are not running
1904          * we may need to preempt the current running task.
1905          * If that current running task is also an RT task
1906          * then see if we can move to another run queue.
1907          */
1908         if (p->on_rq && rq->curr != p) {
1909 #ifdef CONFIG_SMP
1910                 if (rq->rt.overloaded && push_rt_task(rq) &&
1911                     /* Don't resched if we changed runqueues */
1912                     rq != task_rq(p))
1913                         check_resched = 0;
1914 #endif /* CONFIG_SMP */
1915                 if (check_resched && p->prio < rq->curr->prio)
1916                         resched_task(rq->curr);
1917         }
1918 }
1919
1920 /*
1921  * Priority of the task has changed. This may cause
1922  * us to initiate a push or pull.
1923  */
1924 static void
1925 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1926 {
1927         if (!p->on_rq)
1928                 return;
1929
1930         if (rq->curr == p) {
1931 #ifdef CONFIG_SMP
1932                 /*
1933                  * If our priority decreases while running, we
1934                  * may need to pull tasks to this runqueue.
1935                  */
1936                 if (oldprio < p->prio)
1937                         pull_rt_task(rq);
1938                 /*
1939                  * If there's a higher priority task waiting to run
1940                  * then reschedule. Note, the above pull_rt_task
1941                  * can release the rq lock and p could migrate.
1942                  * Only reschedule if p is still on the same runqueue.
1943                  */
1944                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1945                         resched_task(p);
1946 #else
1947                 /* For UP simply resched on drop of prio */
1948                 if (oldprio < p->prio)
1949                         resched_task(p);
1950 #endif /* CONFIG_SMP */
1951         } else {
1952                 /*
1953                  * This task is not running, but if it is
1954                  * greater than the current running task
1955                  * then reschedule.
1956                  */
1957                 if (p->prio < rq->curr->prio)
1958                         resched_task(rq->curr);
1959         }
1960 }
1961
1962 static void watchdog(struct rq *rq, struct task_struct *p)
1963 {
1964         unsigned long soft, hard;
1965
1966         /* max may change after cur was read, this will be fixed next tick */
1967         soft = task_rlimit(p, RLIMIT_RTTIME);
1968         hard = task_rlimit_max(p, RLIMIT_RTTIME);
1969
1970         if (soft != RLIM_INFINITY) {
1971                 unsigned long next;
1972
1973                 p->rt.timeout++;
1974                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1975                 if (p->rt.timeout > next)
1976                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1977         }
1978 }
1979
1980 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1981 {
1982         update_curr_rt(rq);
1983
1984         watchdog(rq, p);
1985
1986         /*
1987          * RR tasks need a special form of timeslice management.
1988          * FIFO tasks have no timeslices.
1989          */
1990         if (p->policy != SCHED_RR)
1991                 return;
1992
1993         if (--p->rt.time_slice)
1994                 return;
1995
1996         p->rt.time_slice = RR_TIMESLICE;
1997
1998         /*
1999          * Requeue to the end of queue if we are not the only element
2000          * on the queue:
2001          */
2002         if (p->rt.run_list.prev != p->rt.run_list.next) {
2003                 requeue_task_rt(rq, p, 0);
2004                 set_tsk_need_resched(p);
2005         }
2006 }
2007
2008 static void set_curr_task_rt(struct rq *rq)
2009 {
2010         struct task_struct *p = rq->curr;
2011
2012         p->se.exec_start = rq->clock_task;
2013
2014         /* The running task is never eligible for pushing */
2015         dequeue_pushable_task(rq, p);
2016 }
2017
2018 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2019 {
2020         /*
2021          * Time slice is 0 for SCHED_FIFO tasks
2022          */
2023         if (task->policy == SCHED_RR)
2024                 return RR_TIMESLICE;
2025         else
2026                 return 0;
2027 }
2028
2029 const struct sched_class rt_sched_class = {
2030         .next                   = &fair_sched_class,
2031         .enqueue_task           = enqueue_task_rt,
2032         .dequeue_task           = dequeue_task_rt,
2033         .yield_task             = yield_task_rt,
2034
2035         .check_preempt_curr     = check_preempt_curr_rt,
2036
2037         .pick_next_task         = pick_next_task_rt,
2038         .put_prev_task          = put_prev_task_rt,
2039
2040 #ifdef CONFIG_SMP
2041         .select_task_rq         = select_task_rq_rt,
2042
2043         .set_cpus_allowed       = set_cpus_allowed_rt,
2044         .rq_online              = rq_online_rt,
2045         .rq_offline             = rq_offline_rt,
2046         .pre_schedule           = pre_schedule_rt,
2047         .post_schedule          = post_schedule_rt,
2048         .task_woken             = task_woken_rt,
2049         .switched_from          = switched_from_rt,
2050 #endif
2051
2052         .set_curr_task          = set_curr_task_rt,
2053         .task_tick              = task_tick_rt,
2054
2055         .get_rr_interval        = get_rr_interval_rt,
2056
2057         .prio_changed           = prio_changed_rt,
2058         .switched_to            = switched_to_rt,
2059 };
2060
2061 #ifdef CONFIG_SCHED_DEBUG
2062 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2063
2064 void print_rt_stats(struct seq_file *m, int cpu)
2065 {
2066         rt_rq_iter_t iter;
2067         struct rt_rq *rt_rq;
2068
2069         rcu_read_lock();
2070         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2071                 print_rt_rq(m, cpu, rt_rq);
2072         rcu_read_unlock();
2073 }
2074 #endif /* CONFIG_SCHED_DEBUG */