]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - block/cfq-iosched.c
blkcg: s/CFQ_WEIGHT_*/CFQ_WEIGHT_LEGACY_*/
[karo-tx-linux.git] / block / cfq-iosched.c
1 /*
2  *  CFQ, or complete fairness queueing, disk scheduler.
3  *
4  *  Based on ideas from a previously unfinished io
5  *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6  *
7  *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8  */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/blkdev.h>
12 #include <linux/elevator.h>
13 #include <linux/jiffies.h>
14 #include <linux/rbtree.h>
15 #include <linux/ioprio.h>
16 #include <linux/blktrace_api.h>
17 #include <linux/blk-cgroup.h>
18 #include "blk.h"
19
20 /*
21  * tunables
22  */
23 /* max queue in one round of service */
24 static const int cfq_quantum = 8;
25 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26 /* maximum backwards seek, in KiB */
27 static const int cfq_back_max = 16 * 1024;
28 /* penalty of a backwards seek */
29 static const int cfq_back_penalty = 2;
30 static const int cfq_slice_sync = HZ / 10;
31 static int cfq_slice_async = HZ / 25;
32 static const int cfq_slice_async_rq = 2;
33 static int cfq_slice_idle = HZ / 125;
34 static int cfq_group_idle = HZ / 125;
35 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36 static const int cfq_hist_divisor = 4;
37
38 /*
39  * offset from end of service tree
40  */
41 #define CFQ_IDLE_DELAY          (HZ / 5)
42
43 /*
44  * below this threshold, we consider thinktime immediate
45  */
46 #define CFQ_MIN_TT              (2)
47
48 #define CFQ_SLICE_SCALE         (5)
49 #define CFQ_HW_QUEUE_MIN        (5)
50 #define CFQ_SERVICE_SHIFT       12
51
52 #define CFQQ_SEEK_THR           (sector_t)(8 * 100)
53 #define CFQQ_CLOSE_THR          (sector_t)(8 * 1024)
54 #define CFQQ_SECT_THR_NONROT    (sector_t)(2 * 32)
55 #define CFQQ_SEEKY(cfqq)        (hweight32(cfqq->seek_history) > 32/8)
56
57 #define RQ_CIC(rq)              icq_to_cic((rq)->elv.icq)
58 #define RQ_CFQQ(rq)             (struct cfq_queue *) ((rq)->elv.priv[0])
59 #define RQ_CFQG(rq)             (struct cfq_group *) ((rq)->elv.priv[1])
60
61 static struct kmem_cache *cfq_pool;
62
63 #define CFQ_PRIO_LISTS          IOPRIO_BE_NR
64 #define cfq_class_idle(cfqq)    ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
65 #define cfq_class_rt(cfqq)      ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
67 #define sample_valid(samples)   ((samples) > 80)
68 #define rb_entry_cfqg(node)     rb_entry((node), struct cfq_group, rb_node)
69
70 /* blkio-related constants */
71 #define CFQ_WEIGHT_LEGACY_MIN   10
72 #define CFQ_WEIGHT_LEGACY_DFL   500
73 #define CFQ_WEIGHT_LEGACY_MAX   1000
74
75 struct cfq_ttime {
76         unsigned long last_end_request;
77
78         unsigned long ttime_total;
79         unsigned long ttime_samples;
80         unsigned long ttime_mean;
81 };
82
83 /*
84  * Most of our rbtree usage is for sorting with min extraction, so
85  * if we cache the leftmost node we don't have to walk down the tree
86  * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
87  * move this into the elevator for the rq sorting as well.
88  */
89 struct cfq_rb_root {
90         struct rb_root rb;
91         struct rb_node *left;
92         unsigned count;
93         u64 min_vdisktime;
94         struct cfq_ttime ttime;
95 };
96 #define CFQ_RB_ROOT     (struct cfq_rb_root) { .rb = RB_ROOT, \
97                         .ttime = {.last_end_request = jiffies,},}
98
99 /*
100  * Per process-grouping structure
101  */
102 struct cfq_queue {
103         /* reference count */
104         int ref;
105         /* various state flags, see below */
106         unsigned int flags;
107         /* parent cfq_data */
108         struct cfq_data *cfqd;
109         /* service_tree member */
110         struct rb_node rb_node;
111         /* service_tree key */
112         unsigned long rb_key;
113         /* prio tree member */
114         struct rb_node p_node;
115         /* prio tree root we belong to, if any */
116         struct rb_root *p_root;
117         /* sorted list of pending requests */
118         struct rb_root sort_list;
119         /* if fifo isn't expired, next request to serve */
120         struct request *next_rq;
121         /* requests queued in sort_list */
122         int queued[2];
123         /* currently allocated requests */
124         int allocated[2];
125         /* fifo list of requests in sort_list */
126         struct list_head fifo;
127
128         /* time when queue got scheduled in to dispatch first request. */
129         unsigned long dispatch_start;
130         unsigned int allocated_slice;
131         unsigned int slice_dispatch;
132         /* time when first request from queue completed and slice started. */
133         unsigned long slice_start;
134         unsigned long slice_end;
135         long slice_resid;
136
137         /* pending priority requests */
138         int prio_pending;
139         /* number of requests that are on the dispatch list or inside driver */
140         int dispatched;
141
142         /* io prio of this group */
143         unsigned short ioprio, org_ioprio;
144         unsigned short ioprio_class;
145
146         pid_t pid;
147
148         u32 seek_history;
149         sector_t last_request_pos;
150
151         struct cfq_rb_root *service_tree;
152         struct cfq_queue *new_cfqq;
153         struct cfq_group *cfqg;
154         /* Number of sectors dispatched from queue in single dispatch round */
155         unsigned long nr_sectors;
156 };
157
158 /*
159  * First index in the service_trees.
160  * IDLE is handled separately, so it has negative index
161  */
162 enum wl_class_t {
163         BE_WORKLOAD = 0,
164         RT_WORKLOAD = 1,
165         IDLE_WORKLOAD = 2,
166         CFQ_PRIO_NR,
167 };
168
169 /*
170  * Second index in the service_trees.
171  */
172 enum wl_type_t {
173         ASYNC_WORKLOAD = 0,
174         SYNC_NOIDLE_WORKLOAD = 1,
175         SYNC_WORKLOAD = 2
176 };
177
178 struct cfqg_stats {
179 #ifdef CONFIG_CFQ_GROUP_IOSCHED
180         /* number of ios merged */
181         struct blkg_rwstat              merged;
182         /* total time spent on device in ns, may not be accurate w/ queueing */
183         struct blkg_rwstat              service_time;
184         /* total time spent waiting in scheduler queue in ns */
185         struct blkg_rwstat              wait_time;
186         /* number of IOs queued up */
187         struct blkg_rwstat              queued;
188         /* total disk time and nr sectors dispatched by this group */
189         struct blkg_stat                time;
190 #ifdef CONFIG_DEBUG_BLK_CGROUP
191         /* time not charged to this cgroup */
192         struct blkg_stat                unaccounted_time;
193         /* sum of number of ios queued across all samples */
194         struct blkg_stat                avg_queue_size_sum;
195         /* count of samples taken for average */
196         struct blkg_stat                avg_queue_size_samples;
197         /* how many times this group has been removed from service tree */
198         struct blkg_stat                dequeue;
199         /* total time spent waiting for it to be assigned a timeslice. */
200         struct blkg_stat                group_wait_time;
201         /* time spent idling for this blkcg_gq */
202         struct blkg_stat                idle_time;
203         /* total time with empty current active q with other requests queued */
204         struct blkg_stat                empty_time;
205         /* fields after this shouldn't be cleared on stat reset */
206         uint64_t                        start_group_wait_time;
207         uint64_t                        start_idle_time;
208         uint64_t                        start_empty_time;
209         uint16_t                        flags;
210 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
211 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
212 };
213
214 /* Per-cgroup data */
215 struct cfq_group_data {
216         /* must be the first member */
217         struct blkcg_policy_data cpd;
218
219         unsigned int weight;
220         unsigned int leaf_weight;
221 };
222
223 /* This is per cgroup per device grouping structure */
224 struct cfq_group {
225         /* must be the first member */
226         struct blkg_policy_data pd;
227
228         /* group service_tree member */
229         struct rb_node rb_node;
230
231         /* group service_tree key */
232         u64 vdisktime;
233
234         /*
235          * The number of active cfqgs and sum of their weights under this
236          * cfqg.  This covers this cfqg's leaf_weight and all children's
237          * weights, but does not cover weights of further descendants.
238          *
239          * If a cfqg is on the service tree, it's active.  An active cfqg
240          * also activates its parent and contributes to the children_weight
241          * of the parent.
242          */
243         int nr_active;
244         unsigned int children_weight;
245
246         /*
247          * vfraction is the fraction of vdisktime that the tasks in this
248          * cfqg are entitled to.  This is determined by compounding the
249          * ratios walking up from this cfqg to the root.
250          *
251          * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
252          * vfractions on a service tree is approximately 1.  The sum may
253          * deviate a bit due to rounding errors and fluctuations caused by
254          * cfqgs entering and leaving the service tree.
255          */
256         unsigned int vfraction;
257
258         /*
259          * There are two weights - (internal) weight is the weight of this
260          * cfqg against the sibling cfqgs.  leaf_weight is the wight of
261          * this cfqg against the child cfqgs.  For the root cfqg, both
262          * weights are kept in sync for backward compatibility.
263          */
264         unsigned int weight;
265         unsigned int new_weight;
266         unsigned int dev_weight;
267
268         unsigned int leaf_weight;
269         unsigned int new_leaf_weight;
270         unsigned int dev_leaf_weight;
271
272         /* number of cfqq currently on this group */
273         int nr_cfqq;
274
275         /*
276          * Per group busy queues average. Useful for workload slice calc. We
277          * create the array for each prio class but at run time it is used
278          * only for RT and BE class and slot for IDLE class remains unused.
279          * This is primarily done to avoid confusion and a gcc warning.
280          */
281         unsigned int busy_queues_avg[CFQ_PRIO_NR];
282         /*
283          * rr lists of queues with requests. We maintain service trees for
284          * RT and BE classes. These trees are subdivided in subclasses
285          * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
286          * class there is no subclassification and all the cfq queues go on
287          * a single tree service_tree_idle.
288          * Counts are embedded in the cfq_rb_root
289          */
290         struct cfq_rb_root service_trees[2][3];
291         struct cfq_rb_root service_tree_idle;
292
293         unsigned long saved_wl_slice;
294         enum wl_type_t saved_wl_type;
295         enum wl_class_t saved_wl_class;
296
297         /* number of requests that are on the dispatch list or inside driver */
298         int dispatched;
299         struct cfq_ttime ttime;
300         struct cfqg_stats stats;        /* stats for this cfqg */
301
302         /* async queue for each priority case */
303         struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
304         struct cfq_queue *async_idle_cfqq;
305
306 };
307
308 struct cfq_io_cq {
309         struct io_cq            icq;            /* must be the first member */
310         struct cfq_queue        *cfqq[2];
311         struct cfq_ttime        ttime;
312         int                     ioprio;         /* the current ioprio */
313 #ifdef CONFIG_CFQ_GROUP_IOSCHED
314         uint64_t                blkcg_serial_nr; /* the current blkcg serial */
315 #endif
316 };
317
318 /*
319  * Per block device queue structure
320  */
321 struct cfq_data {
322         struct request_queue *queue;
323         /* Root service tree for cfq_groups */
324         struct cfq_rb_root grp_service_tree;
325         struct cfq_group *root_group;
326
327         /*
328          * The priority currently being served
329          */
330         enum wl_class_t serving_wl_class;
331         enum wl_type_t serving_wl_type;
332         unsigned long workload_expires;
333         struct cfq_group *serving_group;
334
335         /*
336          * Each priority tree is sorted by next_request position.  These
337          * trees are used when determining if two or more queues are
338          * interleaving requests (see cfq_close_cooperator).
339          */
340         struct rb_root prio_trees[CFQ_PRIO_LISTS];
341
342         unsigned int busy_queues;
343         unsigned int busy_sync_queues;
344
345         int rq_in_driver;
346         int rq_in_flight[2];
347
348         /*
349          * queue-depth detection
350          */
351         int rq_queued;
352         int hw_tag;
353         /*
354          * hw_tag can be
355          * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
356          *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
357          *  0 => no NCQ
358          */
359         int hw_tag_est_depth;
360         unsigned int hw_tag_samples;
361
362         /*
363          * idle window management
364          */
365         struct timer_list idle_slice_timer;
366         struct work_struct unplug_work;
367
368         struct cfq_queue *active_queue;
369         struct cfq_io_cq *active_cic;
370
371         sector_t last_position;
372
373         /*
374          * tunables, see top of file
375          */
376         unsigned int cfq_quantum;
377         unsigned int cfq_fifo_expire[2];
378         unsigned int cfq_back_penalty;
379         unsigned int cfq_back_max;
380         unsigned int cfq_slice[2];
381         unsigned int cfq_slice_async_rq;
382         unsigned int cfq_slice_idle;
383         unsigned int cfq_group_idle;
384         unsigned int cfq_latency;
385         unsigned int cfq_target_latency;
386
387         /*
388          * Fallback dummy cfqq for extreme OOM conditions
389          */
390         struct cfq_queue oom_cfqq;
391
392         unsigned long last_delayed_sync;
393 };
394
395 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
396 static void cfq_put_queue(struct cfq_queue *cfqq);
397
398 static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
399                                             enum wl_class_t class,
400                                             enum wl_type_t type)
401 {
402         if (!cfqg)
403                 return NULL;
404
405         if (class == IDLE_WORKLOAD)
406                 return &cfqg->service_tree_idle;
407
408         return &cfqg->service_trees[class][type];
409 }
410
411 enum cfqq_state_flags {
412         CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
413         CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
414         CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
415         CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
416         CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
417         CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
418         CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
419         CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
420         CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
421         CFQ_CFQQ_FLAG_coop,             /* cfqq is shared */
422         CFQ_CFQQ_FLAG_split_coop,       /* shared cfqq will be splitted */
423         CFQ_CFQQ_FLAG_deep,             /* sync cfqq experienced large depth */
424         CFQ_CFQQ_FLAG_wait_busy,        /* Waiting for next request */
425 };
426
427 #define CFQ_CFQQ_FNS(name)                                              \
428 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)         \
429 {                                                                       \
430         (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
431 }                                                                       \
432 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)        \
433 {                                                                       \
434         (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
435 }                                                                       \
436 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)         \
437 {                                                                       \
438         return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
439 }
440
441 CFQ_CFQQ_FNS(on_rr);
442 CFQ_CFQQ_FNS(wait_request);
443 CFQ_CFQQ_FNS(must_dispatch);
444 CFQ_CFQQ_FNS(must_alloc_slice);
445 CFQ_CFQQ_FNS(fifo_expire);
446 CFQ_CFQQ_FNS(idle_window);
447 CFQ_CFQQ_FNS(prio_changed);
448 CFQ_CFQQ_FNS(slice_new);
449 CFQ_CFQQ_FNS(sync);
450 CFQ_CFQQ_FNS(coop);
451 CFQ_CFQQ_FNS(split_coop);
452 CFQ_CFQQ_FNS(deep);
453 CFQ_CFQQ_FNS(wait_busy);
454 #undef CFQ_CFQQ_FNS
455
456 #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
457
458 /* cfqg stats flags */
459 enum cfqg_stats_flags {
460         CFQG_stats_waiting = 0,
461         CFQG_stats_idling,
462         CFQG_stats_empty,
463 };
464
465 #define CFQG_FLAG_FNS(name)                                             \
466 static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)     \
467 {                                                                       \
468         stats->flags |= (1 << CFQG_stats_##name);                       \
469 }                                                                       \
470 static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)    \
471 {                                                                       \
472         stats->flags &= ~(1 << CFQG_stats_##name);                      \
473 }                                                                       \
474 static inline int cfqg_stats_##name(struct cfqg_stats *stats)           \
475 {                                                                       \
476         return (stats->flags & (1 << CFQG_stats_##name)) != 0;          \
477 }                                                                       \
478
479 CFQG_FLAG_FNS(waiting)
480 CFQG_FLAG_FNS(idling)
481 CFQG_FLAG_FNS(empty)
482 #undef CFQG_FLAG_FNS
483
484 /* This should be called with the queue_lock held. */
485 static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
486 {
487         unsigned long long now;
488
489         if (!cfqg_stats_waiting(stats))
490                 return;
491
492         now = sched_clock();
493         if (time_after64(now, stats->start_group_wait_time))
494                 blkg_stat_add(&stats->group_wait_time,
495                               now - stats->start_group_wait_time);
496         cfqg_stats_clear_waiting(stats);
497 }
498
499 /* This should be called with the queue_lock held. */
500 static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
501                                                  struct cfq_group *curr_cfqg)
502 {
503         struct cfqg_stats *stats = &cfqg->stats;
504
505         if (cfqg_stats_waiting(stats))
506                 return;
507         if (cfqg == curr_cfqg)
508                 return;
509         stats->start_group_wait_time = sched_clock();
510         cfqg_stats_mark_waiting(stats);
511 }
512
513 /* This should be called with the queue_lock held. */
514 static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
515 {
516         unsigned long long now;
517
518         if (!cfqg_stats_empty(stats))
519                 return;
520
521         now = sched_clock();
522         if (time_after64(now, stats->start_empty_time))
523                 blkg_stat_add(&stats->empty_time,
524                               now - stats->start_empty_time);
525         cfqg_stats_clear_empty(stats);
526 }
527
528 static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
529 {
530         blkg_stat_add(&cfqg->stats.dequeue, 1);
531 }
532
533 static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
534 {
535         struct cfqg_stats *stats = &cfqg->stats;
536
537         if (blkg_rwstat_total(&stats->queued))
538                 return;
539
540         /*
541          * group is already marked empty. This can happen if cfqq got new
542          * request in parent group and moved to this group while being added
543          * to service tree. Just ignore the event and move on.
544          */
545         if (cfqg_stats_empty(stats))
546                 return;
547
548         stats->start_empty_time = sched_clock();
549         cfqg_stats_mark_empty(stats);
550 }
551
552 static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
553 {
554         struct cfqg_stats *stats = &cfqg->stats;
555
556         if (cfqg_stats_idling(stats)) {
557                 unsigned long long now = sched_clock();
558
559                 if (time_after64(now, stats->start_idle_time))
560                         blkg_stat_add(&stats->idle_time,
561                                       now - stats->start_idle_time);
562                 cfqg_stats_clear_idling(stats);
563         }
564 }
565
566 static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
567 {
568         struct cfqg_stats *stats = &cfqg->stats;
569
570         BUG_ON(cfqg_stats_idling(stats));
571
572         stats->start_idle_time = sched_clock();
573         cfqg_stats_mark_idling(stats);
574 }
575
576 static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
577 {
578         struct cfqg_stats *stats = &cfqg->stats;
579
580         blkg_stat_add(&stats->avg_queue_size_sum,
581                       blkg_rwstat_total(&stats->queued));
582         blkg_stat_add(&stats->avg_queue_size_samples, 1);
583         cfqg_stats_update_group_wait_time(stats);
584 }
585
586 #else   /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
587
588 static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
589 static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
590 static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
591 static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
592 static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
593 static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
594 static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
595
596 #endif  /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
597
598 #ifdef CONFIG_CFQ_GROUP_IOSCHED
599
600 static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
601 {
602         return pd ? container_of(pd, struct cfq_group, pd) : NULL;
603 }
604
605 static struct cfq_group_data
606 *cpd_to_cfqgd(struct blkcg_policy_data *cpd)
607 {
608         return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
609 }
610
611 static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
612 {
613         return pd_to_blkg(&cfqg->pd);
614 }
615
616 static struct blkcg_policy blkcg_policy_cfq;
617
618 static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
619 {
620         return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
621 }
622
623 static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
624 {
625         return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
626 }
627
628 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
629 {
630         struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
631
632         return pblkg ? blkg_to_cfqg(pblkg) : NULL;
633 }
634
635 static inline void cfqg_get(struct cfq_group *cfqg)
636 {
637         return blkg_get(cfqg_to_blkg(cfqg));
638 }
639
640 static inline void cfqg_put(struct cfq_group *cfqg)
641 {
642         return blkg_put(cfqg_to_blkg(cfqg));
643 }
644
645 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  do {                    \
646         char __pbuf[128];                                               \
647                                                                         \
648         blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));  \
649         blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
650                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
651                         cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
652                           __pbuf, ##args);                              \
653 } while (0)
654
655 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)  do {                    \
656         char __pbuf[128];                                               \
657                                                                         \
658         blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));          \
659         blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);    \
660 } while (0)
661
662 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
663                                             struct cfq_group *curr_cfqg, int rw)
664 {
665         blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
666         cfqg_stats_end_empty_time(&cfqg->stats);
667         cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
668 }
669
670 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
671                         unsigned long time, unsigned long unaccounted_time)
672 {
673         blkg_stat_add(&cfqg->stats.time, time);
674 #ifdef CONFIG_DEBUG_BLK_CGROUP
675         blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
676 #endif
677 }
678
679 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
680 {
681         blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
682 }
683
684 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
685 {
686         blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
687 }
688
689 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
690                         uint64_t start_time, uint64_t io_start_time, int rw)
691 {
692         struct cfqg_stats *stats = &cfqg->stats;
693         unsigned long long now = sched_clock();
694
695         if (time_after64(now, io_start_time))
696                 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
697         if (time_after64(io_start_time, start_time))
698                 blkg_rwstat_add(&stats->wait_time, rw,
699                                 io_start_time - start_time);
700 }
701
702 /* @stats = 0 */
703 static void cfqg_stats_reset(struct cfqg_stats *stats)
704 {
705         /* queued stats shouldn't be cleared */
706         blkg_rwstat_reset(&stats->merged);
707         blkg_rwstat_reset(&stats->service_time);
708         blkg_rwstat_reset(&stats->wait_time);
709         blkg_stat_reset(&stats->time);
710 #ifdef CONFIG_DEBUG_BLK_CGROUP
711         blkg_stat_reset(&stats->unaccounted_time);
712         blkg_stat_reset(&stats->avg_queue_size_sum);
713         blkg_stat_reset(&stats->avg_queue_size_samples);
714         blkg_stat_reset(&stats->dequeue);
715         blkg_stat_reset(&stats->group_wait_time);
716         blkg_stat_reset(&stats->idle_time);
717         blkg_stat_reset(&stats->empty_time);
718 #endif
719 }
720
721 /* @to += @from */
722 static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
723 {
724         /* queued stats shouldn't be cleared */
725         blkg_rwstat_add_aux(&to->merged, &from->merged);
726         blkg_rwstat_add_aux(&to->service_time, &from->service_time);
727         blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
728         blkg_stat_add_aux(&from->time, &from->time);
729 #ifdef CONFIG_DEBUG_BLK_CGROUP
730         blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
731         blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
732         blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
733         blkg_stat_add_aux(&to->dequeue, &from->dequeue);
734         blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
735         blkg_stat_add_aux(&to->idle_time, &from->idle_time);
736         blkg_stat_add_aux(&to->empty_time, &from->empty_time);
737 #endif
738 }
739
740 /*
741  * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
742  * recursive stats can still account for the amount used by this cfqg after
743  * it's gone.
744  */
745 static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
746 {
747         struct cfq_group *parent = cfqg_parent(cfqg);
748
749         lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
750
751         if (unlikely(!parent))
752                 return;
753
754         cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
755         cfqg_stats_reset(&cfqg->stats);
756 }
757
758 #else   /* CONFIG_CFQ_GROUP_IOSCHED */
759
760 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
761 static inline void cfqg_get(struct cfq_group *cfqg) { }
762 static inline void cfqg_put(struct cfq_group *cfqg) { }
763
764 #define cfq_log_cfqq(cfqd, cfqq, fmt, args...)  \
765         blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
766                         cfq_cfqq_sync((cfqq)) ? 'S' : 'A',              \
767                         cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
768                                 ##args)
769 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)          do {} while (0)
770
771 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
772                         struct cfq_group *curr_cfqg, int rw) { }
773 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
774                         unsigned long time, unsigned long unaccounted_time) { }
775 static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
776 static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
777 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
778                         uint64_t start_time, uint64_t io_start_time, int rw) { }
779
780 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
781
782 #define cfq_log(cfqd, fmt, args...)     \
783         blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
784
785 /* Traverses through cfq group service trees */
786 #define for_each_cfqg_st(cfqg, i, j, st) \
787         for (i = 0; i <= IDLE_WORKLOAD; i++) \
788                 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
789                         : &cfqg->service_tree_idle; \
790                         (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
791                         (i == IDLE_WORKLOAD && j == 0); \
792                         j++, st = i < IDLE_WORKLOAD ? \
793                         &cfqg->service_trees[i][j]: NULL) \
794
795 static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
796         struct cfq_ttime *ttime, bool group_idle)
797 {
798         unsigned long slice;
799         if (!sample_valid(ttime->ttime_samples))
800                 return false;
801         if (group_idle)
802                 slice = cfqd->cfq_group_idle;
803         else
804                 slice = cfqd->cfq_slice_idle;
805         return ttime->ttime_mean > slice;
806 }
807
808 static inline bool iops_mode(struct cfq_data *cfqd)
809 {
810         /*
811          * If we are not idling on queues and it is a NCQ drive, parallel
812          * execution of requests is on and measuring time is not possible
813          * in most of the cases until and unless we drive shallower queue
814          * depths and that becomes a performance bottleneck. In such cases
815          * switch to start providing fairness in terms of number of IOs.
816          */
817         if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
818                 return true;
819         else
820                 return false;
821 }
822
823 static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
824 {
825         if (cfq_class_idle(cfqq))
826                 return IDLE_WORKLOAD;
827         if (cfq_class_rt(cfqq))
828                 return RT_WORKLOAD;
829         return BE_WORKLOAD;
830 }
831
832
833 static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
834 {
835         if (!cfq_cfqq_sync(cfqq))
836                 return ASYNC_WORKLOAD;
837         if (!cfq_cfqq_idle_window(cfqq))
838                 return SYNC_NOIDLE_WORKLOAD;
839         return SYNC_WORKLOAD;
840 }
841
842 static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
843                                         struct cfq_data *cfqd,
844                                         struct cfq_group *cfqg)
845 {
846         if (wl_class == IDLE_WORKLOAD)
847                 return cfqg->service_tree_idle.count;
848
849         return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
850                 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
851                 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
852 }
853
854 static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
855                                         struct cfq_group *cfqg)
856 {
857         return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
858                 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
859 }
860
861 static void cfq_dispatch_insert(struct request_queue *, struct request *);
862 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
863                                        struct cfq_io_cq *cic, struct bio *bio);
864
865 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
866 {
867         /* cic->icq is the first member, %NULL will convert to %NULL */
868         return container_of(icq, struct cfq_io_cq, icq);
869 }
870
871 static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
872                                                struct io_context *ioc)
873 {
874         if (ioc)
875                 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
876         return NULL;
877 }
878
879 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
880 {
881         return cic->cfqq[is_sync];
882 }
883
884 static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
885                                 bool is_sync)
886 {
887         cic->cfqq[is_sync] = cfqq;
888 }
889
890 static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
891 {
892         return cic->icq.q->elevator->elevator_data;
893 }
894
895 /*
896  * We regard a request as SYNC, if it's either a read or has the SYNC bit
897  * set (in which case it could also be direct WRITE).
898  */
899 static inline bool cfq_bio_sync(struct bio *bio)
900 {
901         return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
902 }
903
904 /*
905  * scheduler run of queue, if there are requests pending and no one in the
906  * driver that will restart queueing
907  */
908 static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
909 {
910         if (cfqd->busy_queues) {
911                 cfq_log(cfqd, "schedule dispatch");
912                 kblockd_schedule_work(&cfqd->unplug_work);
913         }
914 }
915
916 /*
917  * Scale schedule slice based on io priority. Use the sync time slice only
918  * if a queue is marked sync and has sync io queued. A sync queue with async
919  * io only, should not get full sync slice length.
920  */
921 static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
922                                  unsigned short prio)
923 {
924         const int base_slice = cfqd->cfq_slice[sync];
925
926         WARN_ON(prio >= IOPRIO_BE_NR);
927
928         return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
929 }
930
931 static inline int
932 cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
933 {
934         return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
935 }
936
937 /**
938  * cfqg_scale_charge - scale disk time charge according to cfqg weight
939  * @charge: disk time being charged
940  * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
941  *
942  * Scale @charge according to @vfraction, which is in range (0, 1].  The
943  * scaling is inversely proportional.
944  *
945  * scaled = charge / vfraction
946  *
947  * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
948  */
949 static inline u64 cfqg_scale_charge(unsigned long charge,
950                                     unsigned int vfraction)
951 {
952         u64 c = charge << CFQ_SERVICE_SHIFT;    /* make it fixed point */
953
954         /* charge / vfraction */
955         c <<= CFQ_SERVICE_SHIFT;
956         do_div(c, vfraction);
957         return c;
958 }
959
960 static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
961 {
962         s64 delta = (s64)(vdisktime - min_vdisktime);
963         if (delta > 0)
964                 min_vdisktime = vdisktime;
965
966         return min_vdisktime;
967 }
968
969 static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
970 {
971         s64 delta = (s64)(vdisktime - min_vdisktime);
972         if (delta < 0)
973                 min_vdisktime = vdisktime;
974
975         return min_vdisktime;
976 }
977
978 static void update_min_vdisktime(struct cfq_rb_root *st)
979 {
980         struct cfq_group *cfqg;
981
982         if (st->left) {
983                 cfqg = rb_entry_cfqg(st->left);
984                 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
985                                                   cfqg->vdisktime);
986         }
987 }
988
989 /*
990  * get averaged number of queues of RT/BE priority.
991  * average is updated, with a formula that gives more weight to higher numbers,
992  * to quickly follows sudden increases and decrease slowly
993  */
994
995 static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
996                                         struct cfq_group *cfqg, bool rt)
997 {
998         unsigned min_q, max_q;
999         unsigned mult  = cfq_hist_divisor - 1;
1000         unsigned round = cfq_hist_divisor / 2;
1001         unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1002
1003         min_q = min(cfqg->busy_queues_avg[rt], busy);
1004         max_q = max(cfqg->busy_queues_avg[rt], busy);
1005         cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1006                 cfq_hist_divisor;
1007         return cfqg->busy_queues_avg[rt];
1008 }
1009
1010 static inline unsigned
1011 cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1012 {
1013         return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1014 }
1015
1016 static inline unsigned
1017 cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1018 {
1019         unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1020         if (cfqd->cfq_latency) {
1021                 /*
1022                  * interested queues (we consider only the ones with the same
1023                  * priority class in the cfq group)
1024                  */
1025                 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1026                                                 cfq_class_rt(cfqq));
1027                 unsigned sync_slice = cfqd->cfq_slice[1];
1028                 unsigned expect_latency = sync_slice * iq;
1029                 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1030
1031                 if (expect_latency > group_slice) {
1032                         unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1033                         /* scale low_slice according to IO priority
1034                          * and sync vs async */
1035                         unsigned low_slice =
1036                                 min(slice, base_low_slice * slice / sync_slice);
1037                         /* the adapted slice value is scaled to fit all iqs
1038                          * into the target latency */
1039                         slice = max(slice * group_slice / expect_latency,
1040                                     low_slice);
1041                 }
1042         }
1043         return slice;
1044 }
1045
1046 static inline void
1047 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1048 {
1049         unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1050
1051         cfqq->slice_start = jiffies;
1052         cfqq->slice_end = jiffies + slice;
1053         cfqq->allocated_slice = slice;
1054         cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1055 }
1056
1057 /*
1058  * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1059  * isn't valid until the first request from the dispatch is activated
1060  * and the slice time set.
1061  */
1062 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1063 {
1064         if (cfq_cfqq_slice_new(cfqq))
1065                 return false;
1066         if (time_before(jiffies, cfqq->slice_end))
1067                 return false;
1068
1069         return true;
1070 }
1071
1072 /*
1073  * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1074  * We choose the request that is closest to the head right now. Distance
1075  * behind the head is penalized and only allowed to a certain extent.
1076  */
1077 static struct request *
1078 cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1079 {
1080         sector_t s1, s2, d1 = 0, d2 = 0;
1081         unsigned long back_max;
1082 #define CFQ_RQ1_WRAP    0x01 /* request 1 wraps */
1083 #define CFQ_RQ2_WRAP    0x02 /* request 2 wraps */
1084         unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1085
1086         if (rq1 == NULL || rq1 == rq2)
1087                 return rq2;
1088         if (rq2 == NULL)
1089                 return rq1;
1090
1091         if (rq_is_sync(rq1) != rq_is_sync(rq2))
1092                 return rq_is_sync(rq1) ? rq1 : rq2;
1093
1094         if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1095                 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1096
1097         s1 = blk_rq_pos(rq1);
1098         s2 = blk_rq_pos(rq2);
1099
1100         /*
1101          * by definition, 1KiB is 2 sectors
1102          */
1103         back_max = cfqd->cfq_back_max * 2;
1104
1105         /*
1106          * Strict one way elevator _except_ in the case where we allow
1107          * short backward seeks which are biased as twice the cost of a
1108          * similar forward seek.
1109          */
1110         if (s1 >= last)
1111                 d1 = s1 - last;
1112         else if (s1 + back_max >= last)
1113                 d1 = (last - s1) * cfqd->cfq_back_penalty;
1114         else
1115                 wrap |= CFQ_RQ1_WRAP;
1116
1117         if (s2 >= last)
1118                 d2 = s2 - last;
1119         else if (s2 + back_max >= last)
1120                 d2 = (last - s2) * cfqd->cfq_back_penalty;
1121         else
1122                 wrap |= CFQ_RQ2_WRAP;
1123
1124         /* Found required data */
1125
1126         /*
1127          * By doing switch() on the bit mask "wrap" we avoid having to
1128          * check two variables for all permutations: --> faster!
1129          */
1130         switch (wrap) {
1131         case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1132                 if (d1 < d2)
1133                         return rq1;
1134                 else if (d2 < d1)
1135                         return rq2;
1136                 else {
1137                         if (s1 >= s2)
1138                                 return rq1;
1139                         else
1140                                 return rq2;
1141                 }
1142
1143         case CFQ_RQ2_WRAP:
1144                 return rq1;
1145         case CFQ_RQ1_WRAP:
1146                 return rq2;
1147         case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1148         default:
1149                 /*
1150                  * Since both rqs are wrapped,
1151                  * start with the one that's further behind head
1152                  * (--> only *one* back seek required),
1153                  * since back seek takes more time than forward.
1154                  */
1155                 if (s1 <= s2)
1156                         return rq1;
1157                 else
1158                         return rq2;
1159         }
1160 }
1161
1162 /*
1163  * The below is leftmost cache rbtree addon
1164  */
1165 static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1166 {
1167         /* Service tree is empty */
1168         if (!root->count)
1169                 return NULL;
1170
1171         if (!root->left)
1172                 root->left = rb_first(&root->rb);
1173
1174         if (root->left)
1175                 return rb_entry(root->left, struct cfq_queue, rb_node);
1176
1177         return NULL;
1178 }
1179
1180 static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1181 {
1182         if (!root->left)
1183                 root->left = rb_first(&root->rb);
1184
1185         if (root->left)
1186                 return rb_entry_cfqg(root->left);
1187
1188         return NULL;
1189 }
1190
1191 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1192 {
1193         rb_erase(n, root);
1194         RB_CLEAR_NODE(n);
1195 }
1196
1197 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1198 {
1199         if (root->left == n)
1200                 root->left = NULL;
1201         rb_erase_init(n, &root->rb);
1202         --root->count;
1203 }
1204
1205 /*
1206  * would be nice to take fifo expire time into account as well
1207  */
1208 static struct request *
1209 cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1210                   struct request *last)
1211 {
1212         struct rb_node *rbnext = rb_next(&last->rb_node);
1213         struct rb_node *rbprev = rb_prev(&last->rb_node);
1214         struct request *next = NULL, *prev = NULL;
1215
1216         BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1217
1218         if (rbprev)
1219                 prev = rb_entry_rq(rbprev);
1220
1221         if (rbnext)
1222                 next = rb_entry_rq(rbnext);
1223         else {
1224                 rbnext = rb_first(&cfqq->sort_list);
1225                 if (rbnext && rbnext != &last->rb_node)
1226                         next = rb_entry_rq(rbnext);
1227         }
1228
1229         return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1230 }
1231
1232 static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1233                                       struct cfq_queue *cfqq)
1234 {
1235         /*
1236          * just an approximation, should be ok.
1237          */
1238         return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1239                        cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1240 }
1241
1242 static inline s64
1243 cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1244 {
1245         return cfqg->vdisktime - st->min_vdisktime;
1246 }
1247
1248 static void
1249 __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1250 {
1251         struct rb_node **node = &st->rb.rb_node;
1252         struct rb_node *parent = NULL;
1253         struct cfq_group *__cfqg;
1254         s64 key = cfqg_key(st, cfqg);
1255         int left = 1;
1256
1257         while (*node != NULL) {
1258                 parent = *node;
1259                 __cfqg = rb_entry_cfqg(parent);
1260
1261                 if (key < cfqg_key(st, __cfqg))
1262                         node = &parent->rb_left;
1263                 else {
1264                         node = &parent->rb_right;
1265                         left = 0;
1266                 }
1267         }
1268
1269         if (left)
1270                 st->left = &cfqg->rb_node;
1271
1272         rb_link_node(&cfqg->rb_node, parent, node);
1273         rb_insert_color(&cfqg->rb_node, &st->rb);
1274 }
1275
1276 /*
1277  * This has to be called only on activation of cfqg
1278  */
1279 static void
1280 cfq_update_group_weight(struct cfq_group *cfqg)
1281 {
1282         if (cfqg->new_weight) {
1283                 cfqg->weight = cfqg->new_weight;
1284                 cfqg->new_weight = 0;
1285         }
1286 }
1287
1288 static void
1289 cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1290 {
1291         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1292
1293         if (cfqg->new_leaf_weight) {
1294                 cfqg->leaf_weight = cfqg->new_leaf_weight;
1295                 cfqg->new_leaf_weight = 0;
1296         }
1297 }
1298
1299 static void
1300 cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1301 {
1302         unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;      /* start with 1 */
1303         struct cfq_group *pos = cfqg;
1304         struct cfq_group *parent;
1305         bool propagate;
1306
1307         /* add to the service tree */
1308         BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1309
1310         /*
1311          * Update leaf_weight.  We cannot update weight at this point
1312          * because cfqg might already have been activated and is
1313          * contributing its current weight to the parent's child_weight.
1314          */
1315         cfq_update_group_leaf_weight(cfqg);
1316         __cfq_group_service_tree_add(st, cfqg);
1317
1318         /*
1319          * Activate @cfqg and calculate the portion of vfraction @cfqg is
1320          * entitled to.  vfraction is calculated by walking the tree
1321          * towards the root calculating the fraction it has at each level.
1322          * The compounded ratio is how much vfraction @cfqg owns.
1323          *
1324          * Start with the proportion tasks in this cfqg has against active
1325          * children cfqgs - its leaf_weight against children_weight.
1326          */
1327         propagate = !pos->nr_active++;
1328         pos->children_weight += pos->leaf_weight;
1329         vfr = vfr * pos->leaf_weight / pos->children_weight;
1330
1331         /*
1332          * Compound ->weight walking up the tree.  Both activation and
1333          * vfraction calculation are done in the same loop.  Propagation
1334          * stops once an already activated node is met.  vfraction
1335          * calculation should always continue to the root.
1336          */
1337         while ((parent = cfqg_parent(pos))) {
1338                 if (propagate) {
1339                         cfq_update_group_weight(pos);
1340                         propagate = !parent->nr_active++;
1341                         parent->children_weight += pos->weight;
1342                 }
1343                 vfr = vfr * pos->weight / parent->children_weight;
1344                 pos = parent;
1345         }
1346
1347         cfqg->vfraction = max_t(unsigned, vfr, 1);
1348 }
1349
1350 static void
1351 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1352 {
1353         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1354         struct cfq_group *__cfqg;
1355         struct rb_node *n;
1356
1357         cfqg->nr_cfqq++;
1358         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1359                 return;
1360
1361         /*
1362          * Currently put the group at the end. Later implement something
1363          * so that groups get lesser vtime based on their weights, so that
1364          * if group does not loose all if it was not continuously backlogged.
1365          */
1366         n = rb_last(&st->rb);
1367         if (n) {
1368                 __cfqg = rb_entry_cfqg(n);
1369                 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1370         } else
1371                 cfqg->vdisktime = st->min_vdisktime;
1372         cfq_group_service_tree_add(st, cfqg);
1373 }
1374
1375 static void
1376 cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1377 {
1378         struct cfq_group *pos = cfqg;
1379         bool propagate;
1380
1381         /*
1382          * Undo activation from cfq_group_service_tree_add().  Deactivate
1383          * @cfqg and propagate deactivation upwards.
1384          */
1385         propagate = !--pos->nr_active;
1386         pos->children_weight -= pos->leaf_weight;
1387
1388         while (propagate) {
1389                 struct cfq_group *parent = cfqg_parent(pos);
1390
1391                 /* @pos has 0 nr_active at this point */
1392                 WARN_ON_ONCE(pos->children_weight);
1393                 pos->vfraction = 0;
1394
1395                 if (!parent)
1396                         break;
1397
1398                 propagate = !--parent->nr_active;
1399                 parent->children_weight -= pos->weight;
1400                 pos = parent;
1401         }
1402
1403         /* remove from the service tree */
1404         if (!RB_EMPTY_NODE(&cfqg->rb_node))
1405                 cfq_rb_erase(&cfqg->rb_node, st);
1406 }
1407
1408 static void
1409 cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1410 {
1411         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1412
1413         BUG_ON(cfqg->nr_cfqq < 1);
1414         cfqg->nr_cfqq--;
1415
1416         /* If there are other cfq queues under this group, don't delete it */
1417         if (cfqg->nr_cfqq)
1418                 return;
1419
1420         cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1421         cfq_group_service_tree_del(st, cfqg);
1422         cfqg->saved_wl_slice = 0;
1423         cfqg_stats_update_dequeue(cfqg);
1424 }
1425
1426 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1427                                                 unsigned int *unaccounted_time)
1428 {
1429         unsigned int slice_used;
1430
1431         /*
1432          * Queue got expired before even a single request completed or
1433          * got expired immediately after first request completion.
1434          */
1435         if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1436                 /*
1437                  * Also charge the seek time incurred to the group, otherwise
1438                  * if there are mutiple queues in the group, each can dispatch
1439                  * a single request on seeky media and cause lots of seek time
1440                  * and group will never know it.
1441                  */
1442                 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1443                                         1);
1444         } else {
1445                 slice_used = jiffies - cfqq->slice_start;
1446                 if (slice_used > cfqq->allocated_slice) {
1447                         *unaccounted_time = slice_used - cfqq->allocated_slice;
1448                         slice_used = cfqq->allocated_slice;
1449                 }
1450                 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1451                         *unaccounted_time += cfqq->slice_start -
1452                                         cfqq->dispatch_start;
1453         }
1454
1455         return slice_used;
1456 }
1457
1458 static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1459                                 struct cfq_queue *cfqq)
1460 {
1461         struct cfq_rb_root *st = &cfqd->grp_service_tree;
1462         unsigned int used_sl, charge, unaccounted_sl = 0;
1463         int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1464                         - cfqg->service_tree_idle.count;
1465         unsigned int vfr;
1466
1467         BUG_ON(nr_sync < 0);
1468         used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1469
1470         if (iops_mode(cfqd))
1471                 charge = cfqq->slice_dispatch;
1472         else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1473                 charge = cfqq->allocated_slice;
1474
1475         /*
1476          * Can't update vdisktime while on service tree and cfqg->vfraction
1477          * is valid only while on it.  Cache vfr, leave the service tree,
1478          * update vdisktime and go back on.  The re-addition to the tree
1479          * will also update the weights as necessary.
1480          */
1481         vfr = cfqg->vfraction;
1482         cfq_group_service_tree_del(st, cfqg);
1483         cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1484         cfq_group_service_tree_add(st, cfqg);
1485
1486         /* This group is being expired. Save the context */
1487         if (time_after(cfqd->workload_expires, jiffies)) {
1488                 cfqg->saved_wl_slice = cfqd->workload_expires
1489                                                 - jiffies;
1490                 cfqg->saved_wl_type = cfqd->serving_wl_type;
1491                 cfqg->saved_wl_class = cfqd->serving_wl_class;
1492         } else
1493                 cfqg->saved_wl_slice = 0;
1494
1495         cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1496                                         st->min_vdisktime);
1497         cfq_log_cfqq(cfqq->cfqd, cfqq,
1498                      "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1499                      used_sl, cfqq->slice_dispatch, charge,
1500                      iops_mode(cfqd), cfqq->nr_sectors);
1501         cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1502         cfqg_stats_set_start_empty_time(cfqg);
1503 }
1504
1505 /**
1506  * cfq_init_cfqg_base - initialize base part of a cfq_group
1507  * @cfqg: cfq_group to initialize
1508  *
1509  * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1510  * is enabled or not.
1511  */
1512 static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1513 {
1514         struct cfq_rb_root *st;
1515         int i, j;
1516
1517         for_each_cfqg_st(cfqg, i, j, st)
1518                 *st = CFQ_RB_ROOT;
1519         RB_CLEAR_NODE(&cfqg->rb_node);
1520
1521         cfqg->ttime.last_end_request = jiffies;
1522 }
1523
1524 #ifdef CONFIG_CFQ_GROUP_IOSCHED
1525 static void cfqg_stats_exit(struct cfqg_stats *stats)
1526 {
1527         blkg_rwstat_exit(&stats->merged);
1528         blkg_rwstat_exit(&stats->service_time);
1529         blkg_rwstat_exit(&stats->wait_time);
1530         blkg_rwstat_exit(&stats->queued);
1531         blkg_stat_exit(&stats->time);
1532 #ifdef CONFIG_DEBUG_BLK_CGROUP
1533         blkg_stat_exit(&stats->unaccounted_time);
1534         blkg_stat_exit(&stats->avg_queue_size_sum);
1535         blkg_stat_exit(&stats->avg_queue_size_samples);
1536         blkg_stat_exit(&stats->dequeue);
1537         blkg_stat_exit(&stats->group_wait_time);
1538         blkg_stat_exit(&stats->idle_time);
1539         blkg_stat_exit(&stats->empty_time);
1540 #endif
1541 }
1542
1543 static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1544 {
1545         if (blkg_rwstat_init(&stats->merged, gfp) ||
1546             blkg_rwstat_init(&stats->service_time, gfp) ||
1547             blkg_rwstat_init(&stats->wait_time, gfp) ||
1548             blkg_rwstat_init(&stats->queued, gfp) ||
1549             blkg_stat_init(&stats->time, gfp))
1550                 goto err;
1551
1552 #ifdef CONFIG_DEBUG_BLK_CGROUP
1553         if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1554             blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1555             blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1556             blkg_stat_init(&stats->dequeue, gfp) ||
1557             blkg_stat_init(&stats->group_wait_time, gfp) ||
1558             blkg_stat_init(&stats->idle_time, gfp) ||
1559             blkg_stat_init(&stats->empty_time, gfp))
1560                 goto err;
1561 #endif
1562         return 0;
1563 err:
1564         cfqg_stats_exit(stats);
1565         return -ENOMEM;
1566 }
1567
1568 static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1569 {
1570         struct cfq_group_data *cgd;
1571
1572         cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
1573         if (!cgd)
1574                 return NULL;
1575         return &cgd->cpd;
1576 }
1577
1578 static void cfq_cpd_init(struct blkcg_policy_data *cpd)
1579 {
1580         struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
1581
1582         if (cpd_to_blkcg(cpd) == &blkcg_root) {
1583                 cgd->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
1584                 cgd->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
1585         } else {
1586                 cgd->weight = CFQ_WEIGHT_LEGACY_DFL;
1587                 cgd->leaf_weight = CFQ_WEIGHT_LEGACY_DFL;
1588         }
1589 }
1590
1591 static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1592 {
1593         kfree(cpd_to_cfqgd(cpd));
1594 }
1595
1596 static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1597 {
1598         struct cfq_group *cfqg;
1599
1600         cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1601         if (!cfqg)
1602                 return NULL;
1603
1604         cfq_init_cfqg_base(cfqg);
1605         if (cfqg_stats_init(&cfqg->stats, gfp)) {
1606                 kfree(cfqg);
1607                 return NULL;
1608         }
1609
1610         return &cfqg->pd;
1611 }
1612
1613 static void cfq_pd_init(struct blkg_policy_data *pd)
1614 {
1615         struct cfq_group *cfqg = pd_to_cfqg(pd);
1616         struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
1617
1618         cfqg->weight = cgd->weight;
1619         cfqg->leaf_weight = cgd->leaf_weight;
1620 }
1621
1622 static void cfq_pd_offline(struct blkg_policy_data *pd)
1623 {
1624         struct cfq_group *cfqg = pd_to_cfqg(pd);
1625         int i;
1626
1627         for (i = 0; i < IOPRIO_BE_NR; i++) {
1628                 if (cfqg->async_cfqq[0][i])
1629                         cfq_put_queue(cfqg->async_cfqq[0][i]);
1630                 if (cfqg->async_cfqq[1][i])
1631                         cfq_put_queue(cfqg->async_cfqq[1][i]);
1632         }
1633
1634         if (cfqg->async_idle_cfqq)
1635                 cfq_put_queue(cfqg->async_idle_cfqq);
1636
1637         /*
1638          * @blkg is going offline and will be ignored by
1639          * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1640          * that they don't get lost.  If IOs complete after this point, the
1641          * stats for them will be lost.  Oh well...
1642          */
1643         cfqg_stats_xfer_dead(cfqg);
1644 }
1645
1646 static void cfq_pd_free(struct blkg_policy_data *pd)
1647 {
1648         struct cfq_group *cfqg = pd_to_cfqg(pd);
1649
1650         cfqg_stats_exit(&cfqg->stats);
1651         return kfree(cfqg);
1652 }
1653
1654 static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1655 {
1656         struct cfq_group *cfqg = pd_to_cfqg(pd);
1657
1658         cfqg_stats_reset(&cfqg->stats);
1659 }
1660
1661 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1662                                          struct blkcg *blkcg)
1663 {
1664         struct blkcg_gq *blkg;
1665
1666         blkg = blkg_lookup(blkcg, cfqd->queue);
1667         if (likely(blkg))
1668                 return blkg_to_cfqg(blkg);
1669         return NULL;
1670 }
1671
1672 static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1673 {
1674         cfqq->cfqg = cfqg;
1675         /* cfqq reference on cfqg */
1676         cfqg_get(cfqg);
1677 }
1678
1679 static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1680                                      struct blkg_policy_data *pd, int off)
1681 {
1682         struct cfq_group *cfqg = pd_to_cfqg(pd);
1683
1684         if (!cfqg->dev_weight)
1685                 return 0;
1686         return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1687 }
1688
1689 static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1690 {
1691         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1692                           cfqg_prfill_weight_device, &blkcg_policy_cfq,
1693                           0, false);
1694         return 0;
1695 }
1696
1697 static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1698                                           struct blkg_policy_data *pd, int off)
1699 {
1700         struct cfq_group *cfqg = pd_to_cfqg(pd);
1701
1702         if (!cfqg->dev_leaf_weight)
1703                 return 0;
1704         return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1705 }
1706
1707 static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1708 {
1709         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1710                           cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1711                           0, false);
1712         return 0;
1713 }
1714
1715 static int cfq_print_weight(struct seq_file *sf, void *v)
1716 {
1717         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1718         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1719         unsigned int val = 0;
1720
1721         if (cgd)
1722                 val = cgd->weight;
1723
1724         seq_printf(sf, "%u\n", val);
1725         return 0;
1726 }
1727
1728 static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1729 {
1730         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1731         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1732         unsigned int val = 0;
1733
1734         if (cgd)
1735                 val = cgd->leaf_weight;
1736
1737         seq_printf(sf, "%u\n", val);
1738         return 0;
1739 }
1740
1741 static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1742                                         char *buf, size_t nbytes, loff_t off,
1743                                         bool on_dfl, bool is_leaf_weight)
1744 {
1745         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1746         struct blkg_conf_ctx ctx;
1747         struct cfq_group *cfqg;
1748         struct cfq_group_data *cfqgd;
1749         int ret;
1750         u64 v;
1751
1752         ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1753         if (ret)
1754                 return ret;
1755
1756         if (sscanf(ctx.body, "%llu", &v) == 1) {
1757                 /* require "default" on dfl */
1758                 ret = -ERANGE;
1759                 if (!v && on_dfl)
1760                         goto out_finish;
1761         } else if (!strcmp(strim(ctx.body), "default")) {
1762                 v = 0;
1763         } else {
1764                 ret = -EINVAL;
1765                 goto out_finish;
1766         }
1767
1768         cfqg = blkg_to_cfqg(ctx.blkg);
1769         cfqgd = blkcg_to_cfqgd(blkcg);
1770
1771         ret = -ERANGE;
1772         if (!v || (v >= CFQ_WEIGHT_LEGACY_MIN && v <= CFQ_WEIGHT_LEGACY_MAX)) {
1773                 if (!is_leaf_weight) {
1774                         cfqg->dev_weight = v;
1775                         cfqg->new_weight = v ?: cfqgd->weight;
1776                 } else {
1777                         cfqg->dev_leaf_weight = v;
1778                         cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
1779                 }
1780                 ret = 0;
1781         }
1782 out_finish:
1783         blkg_conf_finish(&ctx);
1784         return ret ?: nbytes;
1785 }
1786
1787 static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1788                                       char *buf, size_t nbytes, loff_t off)
1789 {
1790         return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
1791 }
1792
1793 static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1794                                            char *buf, size_t nbytes, loff_t off)
1795 {
1796         return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
1797 }
1798
1799 static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1800                             bool is_leaf_weight)
1801 {
1802         struct blkcg *blkcg = css_to_blkcg(css);
1803         struct blkcg_gq *blkg;
1804         struct cfq_group_data *cfqgd;
1805         int ret = 0;
1806
1807         if (val < CFQ_WEIGHT_LEGACY_MIN || val > CFQ_WEIGHT_LEGACY_MAX)
1808                 return -EINVAL;
1809
1810         spin_lock_irq(&blkcg->lock);
1811         cfqgd = blkcg_to_cfqgd(blkcg);
1812         if (!cfqgd) {
1813                 ret = -EINVAL;
1814                 goto out;
1815         }
1816
1817         if (!is_leaf_weight)
1818                 cfqgd->weight = val;
1819         else
1820                 cfqgd->leaf_weight = val;
1821
1822         hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1823                 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1824
1825                 if (!cfqg)
1826                         continue;
1827
1828                 if (!is_leaf_weight) {
1829                         if (!cfqg->dev_weight)
1830                                 cfqg->new_weight = cfqgd->weight;
1831                 } else {
1832                         if (!cfqg->dev_leaf_weight)
1833                                 cfqg->new_leaf_weight = cfqgd->leaf_weight;
1834                 }
1835         }
1836
1837 out:
1838         spin_unlock_irq(&blkcg->lock);
1839         return ret;
1840 }
1841
1842 static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1843                           u64 val)
1844 {
1845         return __cfq_set_weight(css, val, false);
1846 }
1847
1848 static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1849                                struct cftype *cft, u64 val)
1850 {
1851         return __cfq_set_weight(css, val, true);
1852 }
1853
1854 static int cfqg_print_stat(struct seq_file *sf, void *v)
1855 {
1856         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1857                           &blkcg_policy_cfq, seq_cft(sf)->private, false);
1858         return 0;
1859 }
1860
1861 static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1862 {
1863         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1864                           &blkcg_policy_cfq, seq_cft(sf)->private, true);
1865         return 0;
1866 }
1867
1868 static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1869                                       struct blkg_policy_data *pd, int off)
1870 {
1871         u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1872                                           &blkcg_policy_cfq, off);
1873         return __blkg_prfill_u64(sf, pd, sum);
1874 }
1875
1876 static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1877                                         struct blkg_policy_data *pd, int off)
1878 {
1879         struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1880                                                         &blkcg_policy_cfq, off);
1881         return __blkg_prfill_rwstat(sf, pd, &sum);
1882 }
1883
1884 static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1885 {
1886         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1887                           cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1888                           seq_cft(sf)->private, false);
1889         return 0;
1890 }
1891
1892 static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1893 {
1894         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1895                           cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1896                           seq_cft(sf)->private, true);
1897         return 0;
1898 }
1899
1900 static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1901                                int off)
1902 {
1903         u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1904
1905         return __blkg_prfill_u64(sf, pd, sum >> 9);
1906 }
1907
1908 static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1909 {
1910         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1911                           cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1912         return 0;
1913 }
1914
1915 static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1916                                          struct blkg_policy_data *pd, int off)
1917 {
1918         struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1919                                         offsetof(struct blkcg_gq, stat_bytes));
1920         u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1921                 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1922
1923         return __blkg_prfill_u64(sf, pd, sum >> 9);
1924 }
1925
1926 static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1927 {
1928         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1929                           cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1930                           false);
1931         return 0;
1932 }
1933
1934 #ifdef CONFIG_DEBUG_BLK_CGROUP
1935 static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1936                                       struct blkg_policy_data *pd, int off)
1937 {
1938         struct cfq_group *cfqg = pd_to_cfqg(pd);
1939         u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1940         u64 v = 0;
1941
1942         if (samples) {
1943                 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1944                 v = div64_u64(v, samples);
1945         }
1946         __blkg_prfill_u64(sf, pd, v);
1947         return 0;
1948 }
1949
1950 /* print avg_queue_size */
1951 static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1952 {
1953         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1954                           cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1955                           0, false);
1956         return 0;
1957 }
1958 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
1959
1960 static struct cftype cfq_blkcg_legacy_files[] = {
1961         /* on root, weight is mapped to leaf_weight */
1962         {
1963                 .name = "weight_device",
1964                 .flags = CFTYPE_ONLY_ON_ROOT,
1965                 .seq_show = cfqg_print_leaf_weight_device,
1966                 .write = cfqg_set_leaf_weight_device,
1967         },
1968         {
1969                 .name = "weight",
1970                 .flags = CFTYPE_ONLY_ON_ROOT,
1971                 .seq_show = cfq_print_leaf_weight,
1972                 .write_u64 = cfq_set_leaf_weight,
1973         },
1974
1975         /* no such mapping necessary for !roots */
1976         {
1977                 .name = "weight_device",
1978                 .flags = CFTYPE_NOT_ON_ROOT,
1979                 .seq_show = cfqg_print_weight_device,
1980                 .write = cfqg_set_weight_device,
1981         },
1982         {
1983                 .name = "weight",
1984                 .flags = CFTYPE_NOT_ON_ROOT,
1985                 .seq_show = cfq_print_weight,
1986                 .write_u64 = cfq_set_weight,
1987         },
1988
1989         {
1990                 .name = "leaf_weight_device",
1991                 .seq_show = cfqg_print_leaf_weight_device,
1992                 .write = cfqg_set_leaf_weight_device,
1993         },
1994         {
1995                 .name = "leaf_weight",
1996                 .seq_show = cfq_print_leaf_weight,
1997                 .write_u64 = cfq_set_leaf_weight,
1998         },
1999
2000         /* statistics, covers only the tasks in the cfqg */
2001         {
2002                 .name = "time",
2003                 .private = offsetof(struct cfq_group, stats.time),
2004                 .seq_show = cfqg_print_stat,
2005         },
2006         {
2007                 .name = "sectors",
2008                 .seq_show = cfqg_print_stat_sectors,
2009         },
2010         {
2011                 .name = "io_service_bytes",
2012                 .private = (unsigned long)&blkcg_policy_cfq,
2013                 .seq_show = blkg_print_stat_bytes,
2014         },
2015         {
2016                 .name = "io_serviced",
2017                 .private = (unsigned long)&blkcg_policy_cfq,
2018                 .seq_show = blkg_print_stat_ios,
2019         },
2020         {
2021                 .name = "io_service_time",
2022                 .private = offsetof(struct cfq_group, stats.service_time),
2023                 .seq_show = cfqg_print_rwstat,
2024         },
2025         {
2026                 .name = "io_wait_time",
2027                 .private = offsetof(struct cfq_group, stats.wait_time),
2028                 .seq_show = cfqg_print_rwstat,
2029         },
2030         {
2031                 .name = "io_merged",
2032                 .private = offsetof(struct cfq_group, stats.merged),
2033                 .seq_show = cfqg_print_rwstat,
2034         },
2035         {
2036                 .name = "io_queued",
2037                 .private = offsetof(struct cfq_group, stats.queued),
2038                 .seq_show = cfqg_print_rwstat,
2039         },
2040
2041         /* the same statictics which cover the cfqg and its descendants */
2042         {
2043                 .name = "time_recursive",
2044                 .private = offsetof(struct cfq_group, stats.time),
2045                 .seq_show = cfqg_print_stat_recursive,
2046         },
2047         {
2048                 .name = "sectors_recursive",
2049                 .seq_show = cfqg_print_stat_sectors_recursive,
2050         },
2051         {
2052                 .name = "io_service_bytes_recursive",
2053                 .private = (unsigned long)&blkcg_policy_cfq,
2054                 .seq_show = blkg_print_stat_bytes_recursive,
2055         },
2056         {
2057                 .name = "io_serviced_recursive",
2058                 .private = (unsigned long)&blkcg_policy_cfq,
2059                 .seq_show = blkg_print_stat_ios_recursive,
2060         },
2061         {
2062                 .name = "io_service_time_recursive",
2063                 .private = offsetof(struct cfq_group, stats.service_time),
2064                 .seq_show = cfqg_print_rwstat_recursive,
2065         },
2066         {
2067                 .name = "io_wait_time_recursive",
2068                 .private = offsetof(struct cfq_group, stats.wait_time),
2069                 .seq_show = cfqg_print_rwstat_recursive,
2070         },
2071         {
2072                 .name = "io_merged_recursive",
2073                 .private = offsetof(struct cfq_group, stats.merged),
2074                 .seq_show = cfqg_print_rwstat_recursive,
2075         },
2076         {
2077                 .name = "io_queued_recursive",
2078                 .private = offsetof(struct cfq_group, stats.queued),
2079                 .seq_show = cfqg_print_rwstat_recursive,
2080         },
2081 #ifdef CONFIG_DEBUG_BLK_CGROUP
2082         {
2083                 .name = "avg_queue_size",
2084                 .seq_show = cfqg_print_avg_queue_size,
2085         },
2086         {
2087                 .name = "group_wait_time",
2088                 .private = offsetof(struct cfq_group, stats.group_wait_time),
2089                 .seq_show = cfqg_print_stat,
2090         },
2091         {
2092                 .name = "idle_time",
2093                 .private = offsetof(struct cfq_group, stats.idle_time),
2094                 .seq_show = cfqg_print_stat,
2095         },
2096         {
2097                 .name = "empty_time",
2098                 .private = offsetof(struct cfq_group, stats.empty_time),
2099                 .seq_show = cfqg_print_stat,
2100         },
2101         {
2102                 .name = "dequeue",
2103                 .private = offsetof(struct cfq_group, stats.dequeue),
2104                 .seq_show = cfqg_print_stat,
2105         },
2106         {
2107                 .name = "unaccounted_time",
2108                 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2109                 .seq_show = cfqg_print_stat,
2110         },
2111 #endif  /* CONFIG_DEBUG_BLK_CGROUP */
2112         { }     /* terminate */
2113 };
2114
2115 static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2116 {
2117         struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2118         struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2119
2120         seq_printf(sf, "default %u\n", cgd->weight);
2121         blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2122                           &blkcg_policy_cfq, 0, false);
2123         return 0;
2124 }
2125
2126 static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2127                                      char *buf, size_t nbytes, loff_t off)
2128 {
2129         char *endp;
2130         int ret;
2131         u64 v;
2132
2133         buf = strim(buf);
2134
2135         /* "WEIGHT" or "default WEIGHT" sets the default weight */
2136         v = simple_strtoull(buf, &endp, 0);
2137         if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
2138                 ret = __cfq_set_weight(of_css(of), v, false);
2139                 return ret ?: nbytes;
2140         }
2141
2142         /* "MAJ:MIN WEIGHT" */
2143         return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2144 }
2145
2146 static struct cftype cfq_blkcg_files[] = {
2147         {
2148                 .name = "weight",
2149                 .flags = CFTYPE_NOT_ON_ROOT,
2150                 .seq_show = cfq_print_weight_on_dfl,
2151                 .write = cfq_set_weight_on_dfl,
2152         },
2153         { }     /* terminate */
2154 };
2155
2156 #else /* GROUP_IOSCHED */
2157 static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2158                                          struct blkcg *blkcg)
2159 {
2160         return cfqd->root_group;
2161 }
2162
2163 static inline void
2164 cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2165         cfqq->cfqg = cfqg;
2166 }
2167
2168 #endif /* GROUP_IOSCHED */
2169
2170 /*
2171  * The cfqd->service_trees holds all pending cfq_queue's that have
2172  * requests waiting to be processed. It is sorted in the order that
2173  * we will service the queues.
2174  */
2175 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2176                                  bool add_front)
2177 {
2178         struct rb_node **p, *parent;
2179         struct cfq_queue *__cfqq;
2180         unsigned long rb_key;
2181         struct cfq_rb_root *st;
2182         int left;
2183         int new_cfqq = 1;
2184
2185         st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2186         if (cfq_class_idle(cfqq)) {
2187                 rb_key = CFQ_IDLE_DELAY;
2188                 parent = rb_last(&st->rb);
2189                 if (parent && parent != &cfqq->rb_node) {
2190                         __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2191                         rb_key += __cfqq->rb_key;
2192                 } else
2193                         rb_key += jiffies;
2194         } else if (!add_front) {
2195                 /*
2196                  * Get our rb key offset. Subtract any residual slice
2197                  * value carried from last service. A negative resid
2198                  * count indicates slice overrun, and this should position
2199                  * the next service time further away in the tree.
2200                  */
2201                 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2202                 rb_key -= cfqq->slice_resid;
2203                 cfqq->slice_resid = 0;
2204         } else {
2205                 rb_key = -HZ;
2206                 __cfqq = cfq_rb_first(st);
2207                 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2208         }
2209
2210         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2211                 new_cfqq = 0;
2212                 /*
2213                  * same position, nothing more to do
2214                  */
2215                 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2216                         return;
2217
2218                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2219                 cfqq->service_tree = NULL;
2220         }
2221
2222         left = 1;
2223         parent = NULL;
2224         cfqq->service_tree = st;
2225         p = &st->rb.rb_node;
2226         while (*p) {
2227                 parent = *p;
2228                 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2229
2230                 /*
2231                  * sort by key, that represents service time.
2232                  */
2233                 if (time_before(rb_key, __cfqq->rb_key))
2234                         p = &parent->rb_left;
2235                 else {
2236                         p = &parent->rb_right;
2237                         left = 0;
2238                 }
2239         }
2240
2241         if (left)
2242                 st->left = &cfqq->rb_node;
2243
2244         cfqq->rb_key = rb_key;
2245         rb_link_node(&cfqq->rb_node, parent, p);
2246         rb_insert_color(&cfqq->rb_node, &st->rb);
2247         st->count++;
2248         if (add_front || !new_cfqq)
2249                 return;
2250         cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2251 }
2252
2253 static struct cfq_queue *
2254 cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2255                      sector_t sector, struct rb_node **ret_parent,
2256                      struct rb_node ***rb_link)
2257 {
2258         struct rb_node **p, *parent;
2259         struct cfq_queue *cfqq = NULL;
2260
2261         parent = NULL;
2262         p = &root->rb_node;
2263         while (*p) {
2264                 struct rb_node **n;
2265
2266                 parent = *p;
2267                 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2268
2269                 /*
2270                  * Sort strictly based on sector.  Smallest to the left,
2271                  * largest to the right.
2272                  */
2273                 if (sector > blk_rq_pos(cfqq->next_rq))
2274                         n = &(*p)->rb_right;
2275                 else if (sector < blk_rq_pos(cfqq->next_rq))
2276                         n = &(*p)->rb_left;
2277                 else
2278                         break;
2279                 p = n;
2280                 cfqq = NULL;
2281         }
2282
2283         *ret_parent = parent;
2284         if (rb_link)
2285                 *rb_link = p;
2286         return cfqq;
2287 }
2288
2289 static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2290 {
2291         struct rb_node **p, *parent;
2292         struct cfq_queue *__cfqq;
2293
2294         if (cfqq->p_root) {
2295                 rb_erase(&cfqq->p_node, cfqq->p_root);
2296                 cfqq->p_root = NULL;
2297         }
2298
2299         if (cfq_class_idle(cfqq))
2300                 return;
2301         if (!cfqq->next_rq)
2302                 return;
2303
2304         cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2305         __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2306                                       blk_rq_pos(cfqq->next_rq), &parent, &p);
2307         if (!__cfqq) {
2308                 rb_link_node(&cfqq->p_node, parent, p);
2309                 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2310         } else
2311                 cfqq->p_root = NULL;
2312 }
2313
2314 /*
2315  * Update cfqq's position in the service tree.
2316  */
2317 static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2318 {
2319         /*
2320          * Resorting requires the cfqq to be on the RR list already.
2321          */
2322         if (cfq_cfqq_on_rr(cfqq)) {
2323                 cfq_service_tree_add(cfqd, cfqq, 0);
2324                 cfq_prio_tree_add(cfqd, cfqq);
2325         }
2326 }
2327
2328 /*
2329  * add to busy list of queues for service, trying to be fair in ordering
2330  * the pending list according to last request service
2331  */
2332 static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2333 {
2334         cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2335         BUG_ON(cfq_cfqq_on_rr(cfqq));
2336         cfq_mark_cfqq_on_rr(cfqq);
2337         cfqd->busy_queues++;
2338         if (cfq_cfqq_sync(cfqq))
2339                 cfqd->busy_sync_queues++;
2340
2341         cfq_resort_rr_list(cfqd, cfqq);
2342 }
2343
2344 /*
2345  * Called when the cfqq no longer has requests pending, remove it from
2346  * the service tree.
2347  */
2348 static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2349 {
2350         cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2351         BUG_ON(!cfq_cfqq_on_rr(cfqq));
2352         cfq_clear_cfqq_on_rr(cfqq);
2353
2354         if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2355                 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2356                 cfqq->service_tree = NULL;
2357         }
2358         if (cfqq->p_root) {
2359                 rb_erase(&cfqq->p_node, cfqq->p_root);
2360                 cfqq->p_root = NULL;
2361         }
2362
2363         cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2364         BUG_ON(!cfqd->busy_queues);
2365         cfqd->busy_queues--;
2366         if (cfq_cfqq_sync(cfqq))
2367                 cfqd->busy_sync_queues--;
2368 }
2369
2370 /*
2371  * rb tree support functions
2372  */
2373 static void cfq_del_rq_rb(struct request *rq)
2374 {
2375         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2376         const int sync = rq_is_sync(rq);
2377
2378         BUG_ON(!cfqq->queued[sync]);
2379         cfqq->queued[sync]--;
2380
2381         elv_rb_del(&cfqq->sort_list, rq);
2382
2383         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2384                 /*
2385                  * Queue will be deleted from service tree when we actually
2386                  * expire it later. Right now just remove it from prio tree
2387                  * as it is empty.
2388                  */
2389                 if (cfqq->p_root) {
2390                         rb_erase(&cfqq->p_node, cfqq->p_root);
2391                         cfqq->p_root = NULL;
2392                 }
2393         }
2394 }
2395
2396 static void cfq_add_rq_rb(struct request *rq)
2397 {
2398         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2399         struct cfq_data *cfqd = cfqq->cfqd;
2400         struct request *prev;
2401
2402         cfqq->queued[rq_is_sync(rq)]++;
2403
2404         elv_rb_add(&cfqq->sort_list, rq);
2405
2406         if (!cfq_cfqq_on_rr(cfqq))
2407                 cfq_add_cfqq_rr(cfqd, cfqq);
2408
2409         /*
2410          * check if this request is a better next-serve candidate
2411          */
2412         prev = cfqq->next_rq;
2413         cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2414
2415         /*
2416          * adjust priority tree position, if ->next_rq changes
2417          */
2418         if (prev != cfqq->next_rq)
2419                 cfq_prio_tree_add(cfqd, cfqq);
2420
2421         BUG_ON(!cfqq->next_rq);
2422 }
2423
2424 static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2425 {
2426         elv_rb_del(&cfqq->sort_list, rq);
2427         cfqq->queued[rq_is_sync(rq)]--;
2428         cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2429         cfq_add_rq_rb(rq);
2430         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2431                                  rq->cmd_flags);
2432 }
2433
2434 static struct request *
2435 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2436 {
2437         struct task_struct *tsk = current;
2438         struct cfq_io_cq *cic;
2439         struct cfq_queue *cfqq;
2440
2441         cic = cfq_cic_lookup(cfqd, tsk->io_context);
2442         if (!cic)
2443                 return NULL;
2444
2445         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2446         if (cfqq)
2447                 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2448
2449         return NULL;
2450 }
2451
2452 static void cfq_activate_request(struct request_queue *q, struct request *rq)
2453 {
2454         struct cfq_data *cfqd = q->elevator->elevator_data;
2455
2456         cfqd->rq_in_driver++;
2457         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2458                                                 cfqd->rq_in_driver);
2459
2460         cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2461 }
2462
2463 static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2464 {
2465         struct cfq_data *cfqd = q->elevator->elevator_data;
2466
2467         WARN_ON(!cfqd->rq_in_driver);
2468         cfqd->rq_in_driver--;
2469         cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2470                                                 cfqd->rq_in_driver);
2471 }
2472
2473 static void cfq_remove_request(struct request *rq)
2474 {
2475         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2476
2477         if (cfqq->next_rq == rq)
2478                 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2479
2480         list_del_init(&rq->queuelist);
2481         cfq_del_rq_rb(rq);
2482
2483         cfqq->cfqd->rq_queued--;
2484         cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2485         if (rq->cmd_flags & REQ_PRIO) {
2486                 WARN_ON(!cfqq->prio_pending);
2487                 cfqq->prio_pending--;
2488         }
2489 }
2490
2491 static int cfq_merge(struct request_queue *q, struct request **req,
2492                      struct bio *bio)
2493 {
2494         struct cfq_data *cfqd = q->elevator->elevator_data;
2495         struct request *__rq;
2496
2497         __rq = cfq_find_rq_fmerge(cfqd, bio);
2498         if (__rq && elv_rq_merge_ok(__rq, bio)) {
2499                 *req = __rq;
2500                 return ELEVATOR_FRONT_MERGE;
2501         }
2502
2503         return ELEVATOR_NO_MERGE;
2504 }
2505
2506 static void cfq_merged_request(struct request_queue *q, struct request *req,
2507                                int type)
2508 {
2509         if (type == ELEVATOR_FRONT_MERGE) {
2510                 struct cfq_queue *cfqq = RQ_CFQQ(req);
2511
2512                 cfq_reposition_rq_rb(cfqq, req);
2513         }
2514 }
2515
2516 static void cfq_bio_merged(struct request_queue *q, struct request *req,
2517                                 struct bio *bio)
2518 {
2519         cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2520 }
2521
2522 static void
2523 cfq_merged_requests(struct request_queue *q, struct request *rq,
2524                     struct request *next)
2525 {
2526         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2527         struct cfq_data *cfqd = q->elevator->elevator_data;
2528
2529         /*
2530          * reposition in fifo if next is older than rq
2531          */
2532         if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2533             time_before(next->fifo_time, rq->fifo_time) &&
2534             cfqq == RQ_CFQQ(next)) {
2535                 list_move(&rq->queuelist, &next->queuelist);
2536                 rq->fifo_time = next->fifo_time;
2537         }
2538
2539         if (cfqq->next_rq == next)
2540                 cfqq->next_rq = rq;
2541         cfq_remove_request(next);
2542         cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2543
2544         cfqq = RQ_CFQQ(next);
2545         /*
2546          * all requests of this queue are merged to other queues, delete it
2547          * from the service tree. If it's the active_queue,
2548          * cfq_dispatch_requests() will choose to expire it or do idle
2549          */
2550         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2551             cfqq != cfqd->active_queue)
2552                 cfq_del_cfqq_rr(cfqd, cfqq);
2553 }
2554
2555 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2556                            struct bio *bio)
2557 {
2558         struct cfq_data *cfqd = q->elevator->elevator_data;
2559         struct cfq_io_cq *cic;
2560         struct cfq_queue *cfqq;
2561
2562         /*
2563          * Disallow merge of a sync bio into an async request.
2564          */
2565         if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2566                 return false;
2567
2568         /*
2569          * Lookup the cfqq that this bio will be queued with and allow
2570          * merge only if rq is queued there.
2571          */
2572         cic = cfq_cic_lookup(cfqd, current->io_context);
2573         if (!cic)
2574                 return false;
2575
2576         cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2577         return cfqq == RQ_CFQQ(rq);
2578 }
2579
2580 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2581 {
2582         del_timer(&cfqd->idle_slice_timer);
2583         cfqg_stats_update_idle_time(cfqq->cfqg);
2584 }
2585
2586 static void __cfq_set_active_queue(struct cfq_data *cfqd,
2587                                    struct cfq_queue *cfqq)
2588 {
2589         if (cfqq) {
2590                 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2591                                 cfqd->serving_wl_class, cfqd->serving_wl_type);
2592                 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2593                 cfqq->slice_start = 0;
2594                 cfqq->dispatch_start = jiffies;
2595                 cfqq->allocated_slice = 0;
2596                 cfqq->slice_end = 0;
2597                 cfqq->slice_dispatch = 0;
2598                 cfqq->nr_sectors = 0;
2599
2600                 cfq_clear_cfqq_wait_request(cfqq);
2601                 cfq_clear_cfqq_must_dispatch(cfqq);
2602                 cfq_clear_cfqq_must_alloc_slice(cfqq);
2603                 cfq_clear_cfqq_fifo_expire(cfqq);
2604                 cfq_mark_cfqq_slice_new(cfqq);
2605
2606                 cfq_del_timer(cfqd, cfqq);
2607         }
2608
2609         cfqd->active_queue = cfqq;
2610 }
2611
2612 /*
2613  * current cfqq expired its slice (or was too idle), select new one
2614  */
2615 static void
2616 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2617                     bool timed_out)
2618 {
2619         cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2620
2621         if (cfq_cfqq_wait_request(cfqq))
2622                 cfq_del_timer(cfqd, cfqq);
2623
2624         cfq_clear_cfqq_wait_request(cfqq);
2625         cfq_clear_cfqq_wait_busy(cfqq);
2626
2627         /*
2628          * If this cfqq is shared between multiple processes, check to
2629          * make sure that those processes are still issuing I/Os within
2630          * the mean seek distance.  If not, it may be time to break the
2631          * queues apart again.
2632          */
2633         if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2634                 cfq_mark_cfqq_split_coop(cfqq);
2635
2636         /*
2637          * store what was left of this slice, if the queue idled/timed out
2638          */
2639         if (timed_out) {
2640                 if (cfq_cfqq_slice_new(cfqq))
2641                         cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2642                 else
2643                         cfqq->slice_resid = cfqq->slice_end - jiffies;
2644                 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2645         }
2646
2647         cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2648
2649         if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2650                 cfq_del_cfqq_rr(cfqd, cfqq);
2651
2652         cfq_resort_rr_list(cfqd, cfqq);
2653
2654         if (cfqq == cfqd->active_queue)
2655                 cfqd->active_queue = NULL;
2656
2657         if (cfqd->active_cic) {
2658                 put_io_context(cfqd->active_cic->icq.ioc);
2659                 cfqd->active_cic = NULL;
2660         }
2661 }
2662
2663 static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2664 {
2665         struct cfq_queue *cfqq = cfqd->active_queue;
2666
2667         if (cfqq)
2668                 __cfq_slice_expired(cfqd, cfqq, timed_out);
2669 }
2670
2671 /*
2672  * Get next queue for service. Unless we have a queue preemption,
2673  * we'll simply select the first cfqq in the service tree.
2674  */
2675 static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2676 {
2677         struct cfq_rb_root *st = st_for(cfqd->serving_group,
2678                         cfqd->serving_wl_class, cfqd->serving_wl_type);
2679
2680         if (!cfqd->rq_queued)
2681                 return NULL;
2682
2683         /* There is nothing to dispatch */
2684         if (!st)
2685                 return NULL;
2686         if (RB_EMPTY_ROOT(&st->rb))
2687                 return NULL;
2688         return cfq_rb_first(st);
2689 }
2690
2691 static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2692 {
2693         struct cfq_group *cfqg;
2694         struct cfq_queue *cfqq;
2695         int i, j;
2696         struct cfq_rb_root *st;
2697
2698         if (!cfqd->rq_queued)
2699                 return NULL;
2700
2701         cfqg = cfq_get_next_cfqg(cfqd);
2702         if (!cfqg)
2703                 return NULL;
2704
2705         for_each_cfqg_st(cfqg, i, j, st)
2706                 if ((cfqq = cfq_rb_first(st)) != NULL)
2707                         return cfqq;
2708         return NULL;
2709 }
2710
2711 /*
2712  * Get and set a new active queue for service.
2713  */
2714 static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2715                                               struct cfq_queue *cfqq)
2716 {
2717         if (!cfqq)
2718                 cfqq = cfq_get_next_queue(cfqd);
2719
2720         __cfq_set_active_queue(cfqd, cfqq);
2721         return cfqq;
2722 }
2723
2724 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2725                                           struct request *rq)
2726 {
2727         if (blk_rq_pos(rq) >= cfqd->last_position)
2728                 return blk_rq_pos(rq) - cfqd->last_position;
2729         else
2730                 return cfqd->last_position - blk_rq_pos(rq);
2731 }
2732
2733 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2734                                struct request *rq)
2735 {
2736         return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2737 }
2738
2739 static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2740                                     struct cfq_queue *cur_cfqq)
2741 {
2742         struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2743         struct rb_node *parent, *node;
2744         struct cfq_queue *__cfqq;
2745         sector_t sector = cfqd->last_position;
2746
2747         if (RB_EMPTY_ROOT(root))
2748                 return NULL;
2749
2750         /*
2751          * First, if we find a request starting at the end of the last
2752          * request, choose it.
2753          */
2754         __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2755         if (__cfqq)
2756                 return __cfqq;
2757
2758         /*
2759          * If the exact sector wasn't found, the parent of the NULL leaf
2760          * will contain the closest sector.
2761          */
2762         __cfqq = rb_entry(parent, struct cfq_queue, p_node);
2763         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2764                 return __cfqq;
2765
2766         if (blk_rq_pos(__cfqq->next_rq) < sector)
2767                 node = rb_next(&__cfqq->p_node);
2768         else
2769                 node = rb_prev(&__cfqq->p_node);
2770         if (!node)
2771                 return NULL;
2772
2773         __cfqq = rb_entry(node, struct cfq_queue, p_node);
2774         if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2775                 return __cfqq;
2776
2777         return NULL;
2778 }
2779
2780 /*
2781  * cfqd - obvious
2782  * cur_cfqq - passed in so that we don't decide that the current queue is
2783  *            closely cooperating with itself.
2784  *
2785  * So, basically we're assuming that that cur_cfqq has dispatched at least
2786  * one request, and that cfqd->last_position reflects a position on the disk
2787  * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2788  * assumption.
2789  */
2790 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2791                                               struct cfq_queue *cur_cfqq)
2792 {
2793         struct cfq_queue *cfqq;
2794
2795         if (cfq_class_idle(cur_cfqq))
2796                 return NULL;
2797         if (!cfq_cfqq_sync(cur_cfqq))
2798                 return NULL;
2799         if (CFQQ_SEEKY(cur_cfqq))
2800                 return NULL;
2801
2802         /*
2803          * Don't search priority tree if it's the only queue in the group.
2804          */
2805         if (cur_cfqq->cfqg->nr_cfqq == 1)
2806                 return NULL;
2807
2808         /*
2809          * We should notice if some of the queues are cooperating, eg
2810          * working closely on the same area of the disk. In that case,
2811          * we can group them together and don't waste time idling.
2812          */
2813         cfqq = cfqq_close(cfqd, cur_cfqq);
2814         if (!cfqq)
2815                 return NULL;
2816
2817         /* If new queue belongs to different cfq_group, don't choose it */
2818         if (cur_cfqq->cfqg != cfqq->cfqg)
2819                 return NULL;
2820
2821         /*
2822          * It only makes sense to merge sync queues.
2823          */
2824         if (!cfq_cfqq_sync(cfqq))
2825                 return NULL;
2826         if (CFQQ_SEEKY(cfqq))
2827                 return NULL;
2828
2829         /*
2830          * Do not merge queues of different priority classes
2831          */
2832         if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2833                 return NULL;
2834
2835         return cfqq;
2836 }
2837
2838 /*
2839  * Determine whether we should enforce idle window for this queue.
2840  */
2841
2842 static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2843 {
2844         enum wl_class_t wl_class = cfqq_class(cfqq);
2845         struct cfq_rb_root *st = cfqq->service_tree;
2846
2847         BUG_ON(!st);
2848         BUG_ON(!st->count);
2849
2850         if (!cfqd->cfq_slice_idle)
2851                 return false;
2852
2853         /* We never do for idle class queues. */
2854         if (wl_class == IDLE_WORKLOAD)
2855                 return false;
2856
2857         /* We do for queues that were marked with idle window flag. */
2858         if (cfq_cfqq_idle_window(cfqq) &&
2859            !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2860                 return true;
2861
2862         /*
2863          * Otherwise, we do only if they are the last ones
2864          * in their service tree.
2865          */
2866         if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2867            !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2868                 return true;
2869         cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2870         return false;
2871 }
2872
2873 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2874 {
2875         struct cfq_queue *cfqq = cfqd->active_queue;
2876         struct cfq_io_cq *cic;
2877         unsigned long sl, group_idle = 0;
2878
2879         /*
2880          * SSD device without seek penalty, disable idling. But only do so
2881          * for devices that support queuing, otherwise we still have a problem
2882          * with sync vs async workloads.
2883          */
2884         if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2885                 return;
2886
2887         WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2888         WARN_ON(cfq_cfqq_slice_new(cfqq));
2889
2890         /*
2891          * idle is disabled, either manually or by past process history
2892          */
2893         if (!cfq_should_idle(cfqd, cfqq)) {
2894                 /* no queue idling. Check for group idling */
2895                 if (cfqd->cfq_group_idle)
2896                         group_idle = cfqd->cfq_group_idle;
2897                 else
2898                         return;
2899         }
2900
2901         /*
2902          * still active requests from this queue, don't idle
2903          */
2904         if (cfqq->dispatched)
2905                 return;
2906
2907         /*
2908          * task has exited, don't wait
2909          */
2910         cic = cfqd->active_cic;
2911         if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2912                 return;
2913
2914         /*
2915          * If our average think time is larger than the remaining time
2916          * slice, then don't idle. This avoids overrunning the allotted
2917          * time slice.
2918          */
2919         if (sample_valid(cic->ttime.ttime_samples) &&
2920             (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2921                 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2922                              cic->ttime.ttime_mean);
2923                 return;
2924         }
2925
2926         /* There are other queues in the group, don't do group idle */
2927         if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2928                 return;
2929
2930         cfq_mark_cfqq_wait_request(cfqq);
2931
2932         if (group_idle)
2933                 sl = cfqd->cfq_group_idle;
2934         else
2935                 sl = cfqd->cfq_slice_idle;
2936
2937         mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2938         cfqg_stats_set_start_idle_time(cfqq->cfqg);
2939         cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2940                         group_idle ? 1 : 0);
2941 }
2942
2943 /*
2944  * Move request from internal lists to the request queue dispatch list.
2945  */
2946 static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2947 {
2948         struct cfq_data *cfqd = q->elevator->elevator_data;
2949         struct cfq_queue *cfqq = RQ_CFQQ(rq);
2950
2951         cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2952
2953         cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2954         cfq_remove_request(rq);
2955         cfqq->dispatched++;
2956         (RQ_CFQG(rq))->dispatched++;
2957         elv_dispatch_sort(q, rq);
2958
2959         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2960         cfqq->nr_sectors += blk_rq_sectors(rq);
2961 }
2962
2963 /*
2964  * return expired entry, or NULL to just start from scratch in rbtree
2965  */
2966 static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2967 {
2968         struct request *rq = NULL;
2969
2970         if (cfq_cfqq_fifo_expire(cfqq))
2971                 return NULL;
2972
2973         cfq_mark_cfqq_fifo_expire(cfqq);
2974
2975         if (list_empty(&cfqq->fifo))
2976                 return NULL;
2977
2978         rq = rq_entry_fifo(cfqq->fifo.next);
2979         if (time_before(jiffies, rq->fifo_time))
2980                 rq = NULL;
2981
2982         cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2983         return rq;
2984 }
2985
2986 static inline int
2987 cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2988 {
2989         const int base_rq = cfqd->cfq_slice_async_rq;
2990
2991         WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2992
2993         return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2994 }
2995
2996 /*
2997  * Must be called with the queue_lock held.
2998  */
2999 static int cfqq_process_refs(struct cfq_queue *cfqq)
3000 {
3001         int process_refs, io_refs;
3002
3003         io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
3004         process_refs = cfqq->ref - io_refs;
3005         BUG_ON(process_refs < 0);
3006         return process_refs;
3007 }
3008
3009 static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3010 {
3011         int process_refs, new_process_refs;
3012         struct cfq_queue *__cfqq;
3013
3014         /*
3015          * If there are no process references on the new_cfqq, then it is
3016          * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3017          * chain may have dropped their last reference (not just their
3018          * last process reference).
3019          */
3020         if (!cfqq_process_refs(new_cfqq))
3021                 return;
3022
3023         /* Avoid a circular list and skip interim queue merges */
3024         while ((__cfqq = new_cfqq->new_cfqq)) {
3025                 if (__cfqq == cfqq)
3026                         return;
3027                 new_cfqq = __cfqq;
3028         }
3029
3030         process_refs = cfqq_process_refs(cfqq);
3031         new_process_refs = cfqq_process_refs(new_cfqq);
3032         /*
3033          * If the process for the cfqq has gone away, there is no
3034          * sense in merging the queues.
3035          */
3036         if (process_refs == 0 || new_process_refs == 0)
3037                 return;
3038
3039         /*
3040          * Merge in the direction of the lesser amount of work.
3041          */
3042         if (new_process_refs >= process_refs) {
3043                 cfqq->new_cfqq = new_cfqq;
3044                 new_cfqq->ref += process_refs;
3045         } else {
3046                 new_cfqq->new_cfqq = cfqq;
3047                 cfqq->ref += new_process_refs;
3048         }
3049 }
3050
3051 static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3052                         struct cfq_group *cfqg, enum wl_class_t wl_class)
3053 {
3054         struct cfq_queue *queue;
3055         int i;
3056         bool key_valid = false;
3057         unsigned long lowest_key = 0;
3058         enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3059
3060         for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3061                 /* select the one with lowest rb_key */
3062                 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
3063                 if (queue &&
3064                     (!key_valid || time_before(queue->rb_key, lowest_key))) {
3065                         lowest_key = queue->rb_key;
3066                         cur_best = i;
3067                         key_valid = true;
3068                 }
3069         }
3070
3071         return cur_best;
3072 }
3073
3074 static void
3075 choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
3076 {
3077         unsigned slice;
3078         unsigned count;
3079         struct cfq_rb_root *st;
3080         unsigned group_slice;
3081         enum wl_class_t original_class = cfqd->serving_wl_class;
3082
3083         /* Choose next priority. RT > BE > IDLE */
3084         if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
3085                 cfqd->serving_wl_class = RT_WORKLOAD;
3086         else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
3087                 cfqd->serving_wl_class = BE_WORKLOAD;
3088         else {
3089                 cfqd->serving_wl_class = IDLE_WORKLOAD;
3090                 cfqd->workload_expires = jiffies + 1;
3091                 return;
3092         }
3093
3094         if (original_class != cfqd->serving_wl_class)
3095                 goto new_workload;
3096
3097         /*
3098          * For RT and BE, we have to choose also the type
3099          * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3100          * expiration time
3101          */
3102         st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3103         count = st->count;
3104
3105         /*
3106          * check workload expiration, and that we still have other queues ready
3107          */
3108         if (count && !time_after(jiffies, cfqd->workload_expires))
3109                 return;
3110
3111 new_workload:
3112         /* otherwise select new workload type */
3113         cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
3114                                         cfqd->serving_wl_class);
3115         st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3116         count = st->count;
3117
3118         /*
3119          * the workload slice is computed as a fraction of target latency
3120          * proportional to the number of queues in that workload, over
3121          * all the queues in the same priority class
3122          */
3123         group_slice = cfq_group_slice(cfqd, cfqg);
3124
3125         slice = group_slice * count /
3126                 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3127                       cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3128                                         cfqg));
3129
3130         if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
3131                 unsigned int tmp;
3132
3133                 /*
3134                  * Async queues are currently system wide. Just taking
3135                  * proportion of queues with-in same group will lead to higher
3136                  * async ratio system wide as generally root group is going
3137                  * to have higher weight. A more accurate thing would be to
3138                  * calculate system wide asnc/sync ratio.
3139                  */
3140                 tmp = cfqd->cfq_target_latency *
3141                         cfqg_busy_async_queues(cfqd, cfqg);
3142                 tmp = tmp/cfqd->busy_queues;
3143                 slice = min_t(unsigned, slice, tmp);
3144
3145                 /* async workload slice is scaled down according to
3146                  * the sync/async slice ratio. */
3147                 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
3148         } else
3149                 /* sync workload slice is at least 2 * cfq_slice_idle */
3150                 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3151
3152         slice = max_t(unsigned, slice, CFQ_MIN_TT);
3153         cfq_log(cfqd, "workload slice:%d", slice);
3154         cfqd->workload_expires = jiffies + slice;
3155 }
3156
3157 static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3158 {
3159         struct cfq_rb_root *st = &cfqd->grp_service_tree;
3160         struct cfq_group *cfqg;
3161
3162         if (RB_EMPTY_ROOT(&st->rb))
3163                 return NULL;
3164         cfqg = cfq_rb_first_group(st);
3165         update_min_vdisktime(st);
3166         return cfqg;
3167 }
3168
3169 static void cfq_choose_cfqg(struct cfq_data *cfqd)
3170 {
3171         struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3172
3173         cfqd->serving_group = cfqg;
3174
3175         /* Restore the workload type data */
3176         if (cfqg->saved_wl_slice) {
3177                 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3178                 cfqd->serving_wl_type = cfqg->saved_wl_type;
3179                 cfqd->serving_wl_class = cfqg->saved_wl_class;
3180         } else
3181                 cfqd->workload_expires = jiffies - 1;
3182
3183         choose_wl_class_and_type(cfqd, cfqg);
3184 }
3185
3186 /*
3187  * Select a queue for service. If we have a current active queue,
3188  * check whether to continue servicing it, or retrieve and set a new one.
3189  */
3190 static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3191 {
3192         struct cfq_queue *cfqq, *new_cfqq = NULL;
3193
3194         cfqq = cfqd->active_queue;
3195         if (!cfqq)
3196                 goto new_queue;
3197
3198         if (!cfqd->rq_queued)
3199                 return NULL;
3200
3201         /*
3202          * We were waiting for group to get backlogged. Expire the queue
3203          */
3204         if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3205                 goto expire;
3206
3207         /*
3208          * The active queue has run out of time, expire it and select new.
3209          */
3210         if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3211                 /*
3212                  * If slice had not expired at the completion of last request
3213                  * we might not have turned on wait_busy flag. Don't expire
3214                  * the queue yet. Allow the group to get backlogged.
3215                  *
3216                  * The very fact that we have used the slice, that means we
3217                  * have been idling all along on this queue and it should be
3218                  * ok to wait for this request to complete.
3219                  */
3220                 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3221                     && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3222                         cfqq = NULL;
3223                         goto keep_queue;
3224                 } else
3225                         goto check_group_idle;
3226         }
3227
3228         /*
3229          * The active queue has requests and isn't expired, allow it to
3230          * dispatch.
3231          */
3232         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3233                 goto keep_queue;
3234
3235         /*
3236          * If another queue has a request waiting within our mean seek
3237          * distance, let it run.  The expire code will check for close
3238          * cooperators and put the close queue at the front of the service
3239          * tree.  If possible, merge the expiring queue with the new cfqq.
3240          */
3241         new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3242         if (new_cfqq) {
3243                 if (!cfqq->new_cfqq)
3244                         cfq_setup_merge(cfqq, new_cfqq);
3245                 goto expire;
3246         }
3247
3248         /*
3249          * No requests pending. If the active queue still has requests in
3250          * flight or is idling for a new request, allow either of these
3251          * conditions to happen (or time out) before selecting a new queue.
3252          */
3253         if (timer_pending(&cfqd->idle_slice_timer)) {
3254                 cfqq = NULL;
3255                 goto keep_queue;
3256         }
3257
3258         /*
3259          * This is a deep seek queue, but the device is much faster than
3260          * the queue can deliver, don't idle
3261          **/
3262         if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3263             (cfq_cfqq_slice_new(cfqq) ||
3264             (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3265                 cfq_clear_cfqq_deep(cfqq);
3266                 cfq_clear_cfqq_idle_window(cfqq);
3267         }
3268
3269         if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3270                 cfqq = NULL;
3271                 goto keep_queue;
3272         }
3273
3274         /*
3275          * If group idle is enabled and there are requests dispatched from
3276          * this group, wait for requests to complete.
3277          */
3278 check_group_idle:
3279         if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3280             cfqq->cfqg->dispatched &&
3281             !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3282                 cfqq = NULL;
3283                 goto keep_queue;
3284         }
3285
3286 expire:
3287         cfq_slice_expired(cfqd, 0);
3288 new_queue:
3289         /*
3290          * Current queue expired. Check if we have to switch to a new
3291          * service tree
3292          */
3293         if (!new_cfqq)
3294                 cfq_choose_cfqg(cfqd);
3295
3296         cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3297 keep_queue:
3298         return cfqq;
3299 }
3300
3301 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3302 {
3303         int dispatched = 0;
3304
3305         while (cfqq->next_rq) {
3306                 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3307                 dispatched++;
3308         }
3309
3310         BUG_ON(!list_empty(&cfqq->fifo));
3311
3312         /* By default cfqq is not expired if it is empty. Do it explicitly */
3313         __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3314         return dispatched;
3315 }
3316
3317 /*
3318  * Drain our current requests. Used for barriers and when switching
3319  * io schedulers on-the-fly.
3320  */
3321 static int cfq_forced_dispatch(struct cfq_data *cfqd)
3322 {
3323         struct cfq_queue *cfqq;
3324         int dispatched = 0;
3325
3326         /* Expire the timeslice of the current active queue first */
3327         cfq_slice_expired(cfqd, 0);
3328         while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3329                 __cfq_set_active_queue(cfqd, cfqq);
3330                 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3331         }
3332
3333         BUG_ON(cfqd->busy_queues);
3334
3335         cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3336         return dispatched;
3337 }
3338
3339 static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3340         struct cfq_queue *cfqq)
3341 {
3342         /* the queue hasn't finished any request, can't estimate */
3343         if (cfq_cfqq_slice_new(cfqq))
3344                 return true;
3345         if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3346                 cfqq->slice_end))
3347                 return true;
3348
3349         return false;
3350 }
3351
3352 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3353 {
3354         unsigned int max_dispatch;
3355
3356         /*
3357          * Drain async requests before we start sync IO
3358          */
3359         if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3360                 return false;
3361
3362         /*
3363          * If this is an async queue and we have sync IO in flight, let it wait
3364          */
3365         if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3366                 return false;
3367
3368         max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3369         if (cfq_class_idle(cfqq))
3370                 max_dispatch = 1;
3371
3372         /*
3373          * Does this cfqq already have too much IO in flight?
3374          */
3375         if (cfqq->dispatched >= max_dispatch) {
3376                 bool promote_sync = false;
3377                 /*
3378                  * idle queue must always only have a single IO in flight
3379                  */
3380                 if (cfq_class_idle(cfqq))
3381                         return false;
3382
3383                 /*
3384                  * If there is only one sync queue
3385                  * we can ignore async queue here and give the sync
3386                  * queue no dispatch limit. The reason is a sync queue can
3387                  * preempt async queue, limiting the sync queue doesn't make
3388                  * sense. This is useful for aiostress test.
3389                  */
3390                 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3391                         promote_sync = true;
3392
3393                 /*
3394                  * We have other queues, don't allow more IO from this one
3395                  */
3396                 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3397                                 !promote_sync)
3398                         return false;
3399
3400                 /*
3401                  * Sole queue user, no limit
3402                  */
3403                 if (cfqd->busy_queues == 1 || promote_sync)
3404                         max_dispatch = -1;
3405                 else
3406                         /*
3407                          * Normally we start throttling cfqq when cfq_quantum/2
3408                          * requests have been dispatched. But we can drive
3409                          * deeper queue depths at the beginning of slice
3410                          * subjected to upper limit of cfq_quantum.
3411                          * */
3412                         max_dispatch = cfqd->cfq_quantum;
3413         }
3414
3415         /*
3416          * Async queues must wait a bit before being allowed dispatch.
3417          * We also ramp up the dispatch depth gradually for async IO,
3418          * based on the last sync IO we serviced
3419          */
3420         if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3421                 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3422                 unsigned int depth;
3423
3424                 depth = last_sync / cfqd->cfq_slice[1];
3425                 if (!depth && !cfqq->dispatched)
3426                         depth = 1;
3427                 if (depth < max_dispatch)
3428                         max_dispatch = depth;
3429         }
3430
3431         /*
3432          * If we're below the current max, allow a dispatch
3433          */
3434         return cfqq->dispatched < max_dispatch;
3435 }
3436
3437 /*
3438  * Dispatch a request from cfqq, moving them to the request queue
3439  * dispatch list.
3440  */
3441 static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3442 {
3443         struct request *rq;
3444
3445         BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3446
3447         if (!cfq_may_dispatch(cfqd, cfqq))
3448                 return false;
3449
3450         /*
3451          * follow expired path, else get first next available
3452          */
3453         rq = cfq_check_fifo(cfqq);
3454         if (!rq)
3455                 rq = cfqq->next_rq;
3456
3457         /*
3458          * insert request into driver dispatch list
3459          */
3460         cfq_dispatch_insert(cfqd->queue, rq);
3461
3462         if (!cfqd->active_cic) {
3463                 struct cfq_io_cq *cic = RQ_CIC(rq);
3464
3465                 atomic_long_inc(&cic->icq.ioc->refcount);
3466                 cfqd->active_cic = cic;
3467         }
3468
3469         return true;
3470 }
3471
3472 /*
3473  * Find the cfqq that we need to service and move a request from that to the
3474  * dispatch list
3475  */
3476 static int cfq_dispatch_requests(struct request_queue *q, int force)
3477 {
3478         struct cfq_data *cfqd = q->elevator->elevator_data;
3479         struct cfq_queue *cfqq;
3480
3481         if (!cfqd->busy_queues)
3482                 return 0;
3483
3484         if (unlikely(force))
3485                 return cfq_forced_dispatch(cfqd);
3486
3487         cfqq = cfq_select_queue(cfqd);
3488         if (!cfqq)
3489                 return 0;
3490
3491         /*
3492          * Dispatch a request from this cfqq, if it is allowed
3493          */
3494         if (!cfq_dispatch_request(cfqd, cfqq))
3495                 return 0;
3496
3497         cfqq->slice_dispatch++;
3498         cfq_clear_cfqq_must_dispatch(cfqq);
3499
3500         /*
3501          * expire an async queue immediately if it has used up its slice. idle
3502          * queue always expire after 1 dispatch round.
3503          */
3504         if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3505             cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3506             cfq_class_idle(cfqq))) {
3507                 cfqq->slice_end = jiffies + 1;
3508                 cfq_slice_expired(cfqd, 0);
3509         }
3510
3511         cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3512         return 1;
3513 }
3514
3515 /*
3516  * task holds one reference to the queue, dropped when task exits. each rq
3517  * in-flight on this queue also holds a reference, dropped when rq is freed.
3518  *
3519  * Each cfq queue took a reference on the parent group. Drop it now.
3520  * queue lock must be held here.
3521  */
3522 static void cfq_put_queue(struct cfq_queue *cfqq)
3523 {
3524         struct cfq_data *cfqd = cfqq->cfqd;
3525         struct cfq_group *cfqg;
3526
3527         BUG_ON(cfqq->ref <= 0);
3528
3529         cfqq->ref--;
3530         if (cfqq->ref)
3531                 return;
3532
3533         cfq_log_cfqq(cfqd, cfqq, "put_queue");
3534         BUG_ON(rb_first(&cfqq->sort_list));
3535         BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3536         cfqg = cfqq->cfqg;
3537
3538         if (unlikely(cfqd->active_queue == cfqq)) {
3539                 __cfq_slice_expired(cfqd, cfqq, 0);
3540                 cfq_schedule_dispatch(cfqd);
3541         }
3542
3543         BUG_ON(cfq_cfqq_on_rr(cfqq));
3544         kmem_cache_free(cfq_pool, cfqq);
3545         cfqg_put(cfqg);
3546 }
3547
3548 static void cfq_put_cooperator(struct cfq_queue *cfqq)
3549 {
3550         struct cfq_queue *__cfqq, *next;
3551
3552         /*
3553          * If this queue was scheduled to merge with another queue, be
3554          * sure to drop the reference taken on that queue (and others in
3555          * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3556          */
3557         __cfqq = cfqq->new_cfqq;
3558         while (__cfqq) {
3559                 if (__cfqq == cfqq) {
3560                         WARN(1, "cfqq->new_cfqq loop detected\n");
3561                         break;
3562                 }
3563                 next = __cfqq->new_cfqq;
3564                 cfq_put_queue(__cfqq);
3565                 __cfqq = next;
3566         }
3567 }
3568
3569 static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3570 {
3571         if (unlikely(cfqq == cfqd->active_queue)) {
3572                 __cfq_slice_expired(cfqd, cfqq, 0);
3573                 cfq_schedule_dispatch(cfqd);
3574         }
3575
3576         cfq_put_cooperator(cfqq);
3577
3578         cfq_put_queue(cfqq);
3579 }
3580
3581 static void cfq_init_icq(struct io_cq *icq)
3582 {
3583         struct cfq_io_cq *cic = icq_to_cic(icq);
3584
3585         cic->ttime.last_end_request = jiffies;
3586 }
3587
3588 static void cfq_exit_icq(struct io_cq *icq)
3589 {
3590         struct cfq_io_cq *cic = icq_to_cic(icq);
3591         struct cfq_data *cfqd = cic_to_cfqd(cic);
3592
3593         if (cic_to_cfqq(cic, false)) {
3594                 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3595                 cic_set_cfqq(cic, NULL, false);
3596         }
3597
3598         if (cic_to_cfqq(cic, true)) {
3599                 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3600                 cic_set_cfqq(cic, NULL, true);
3601         }
3602 }
3603
3604 static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3605 {
3606         struct task_struct *tsk = current;
3607         int ioprio_class;
3608
3609         if (!cfq_cfqq_prio_changed(cfqq))
3610                 return;
3611
3612         ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3613         switch (ioprio_class) {
3614         default:
3615                 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3616         case IOPRIO_CLASS_NONE:
3617                 /*
3618                  * no prio set, inherit CPU scheduling settings
3619                  */
3620                 cfqq->ioprio = task_nice_ioprio(tsk);
3621                 cfqq->ioprio_class = task_nice_ioclass(tsk);
3622                 break;
3623         case IOPRIO_CLASS_RT:
3624                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3625                 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3626                 break;
3627         case IOPRIO_CLASS_BE:
3628                 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3629                 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3630                 break;
3631         case IOPRIO_CLASS_IDLE:
3632                 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3633                 cfqq->ioprio = 7;
3634                 cfq_clear_cfqq_idle_window(cfqq);
3635                 break;
3636         }
3637
3638         /*
3639          * keep track of original prio settings in case we have to temporarily
3640          * elevate the priority of this queue
3641          */
3642         cfqq->org_ioprio = cfqq->ioprio;
3643         cfq_clear_cfqq_prio_changed(cfqq);
3644 }
3645
3646 static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3647 {
3648         int ioprio = cic->icq.ioc->ioprio;
3649         struct cfq_data *cfqd = cic_to_cfqd(cic);
3650         struct cfq_queue *cfqq;
3651
3652         /*
3653          * Check whether ioprio has changed.  The condition may trigger
3654          * spuriously on a newly created cic but there's no harm.
3655          */
3656         if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3657                 return;
3658
3659         cfqq = cic_to_cfqq(cic, false);
3660         if (cfqq) {
3661                 cfq_put_queue(cfqq);
3662                 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
3663                 cic_set_cfqq(cic, cfqq, false);
3664         }
3665
3666         cfqq = cic_to_cfqq(cic, true);
3667         if (cfqq)
3668                 cfq_mark_cfqq_prio_changed(cfqq);
3669
3670         cic->ioprio = ioprio;
3671 }
3672
3673 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3674                           pid_t pid, bool is_sync)
3675 {
3676         RB_CLEAR_NODE(&cfqq->rb_node);
3677         RB_CLEAR_NODE(&cfqq->p_node);
3678         INIT_LIST_HEAD(&cfqq->fifo);
3679
3680         cfqq->ref = 0;
3681         cfqq->cfqd = cfqd;
3682
3683         cfq_mark_cfqq_prio_changed(cfqq);
3684
3685         if (is_sync) {
3686                 if (!cfq_class_idle(cfqq))
3687                         cfq_mark_cfqq_idle_window(cfqq);
3688                 cfq_mark_cfqq_sync(cfqq);
3689         }
3690         cfqq->pid = pid;
3691 }
3692
3693 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3694 static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3695 {
3696         struct cfq_data *cfqd = cic_to_cfqd(cic);
3697         struct cfq_queue *cfqq;
3698         uint64_t serial_nr;
3699
3700         rcu_read_lock();
3701         serial_nr = bio_blkcg(bio)->css.serial_nr;
3702         rcu_read_unlock();
3703
3704         /*
3705          * Check whether blkcg has changed.  The condition may trigger
3706          * spuriously on a newly created cic but there's no harm.
3707          */
3708         if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3709                 return;
3710
3711         /*
3712          * Drop reference to queues.  New queues will be assigned in new
3713          * group upon arrival of fresh requests.
3714          */
3715         cfqq = cic_to_cfqq(cic, false);
3716         if (cfqq) {
3717                 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3718                 cic_set_cfqq(cic, NULL, false);
3719                 cfq_put_queue(cfqq);
3720         }
3721
3722         cfqq = cic_to_cfqq(cic, true);
3723         if (cfqq) {
3724                 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3725                 cic_set_cfqq(cic, NULL, true);
3726                 cfq_put_queue(cfqq);
3727         }
3728
3729         cic->blkcg_serial_nr = serial_nr;
3730 }
3731 #else
3732 static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3733 #endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3734
3735 static struct cfq_queue **
3736 cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
3737 {
3738         switch (ioprio_class) {
3739         case IOPRIO_CLASS_RT:
3740                 return &cfqg->async_cfqq[0][ioprio];
3741         case IOPRIO_CLASS_NONE:
3742                 ioprio = IOPRIO_NORM;
3743                 /* fall through */
3744         case IOPRIO_CLASS_BE:
3745                 return &cfqg->async_cfqq[1][ioprio];
3746         case IOPRIO_CLASS_IDLE:
3747                 return &cfqg->async_idle_cfqq;
3748         default:
3749                 BUG();
3750         }
3751 }
3752
3753 static struct cfq_queue *
3754 cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3755               struct bio *bio)
3756 {
3757         int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3758         int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3759         struct cfq_queue **async_cfqq = NULL;
3760         struct cfq_queue *cfqq;
3761         struct cfq_group *cfqg;
3762
3763         rcu_read_lock();
3764         cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
3765         if (!cfqg) {
3766                 cfqq = &cfqd->oom_cfqq;
3767                 goto out;
3768         }
3769
3770         if (!is_sync) {
3771                 if (!ioprio_valid(cic->ioprio)) {
3772                         struct task_struct *tsk = current;
3773                         ioprio = task_nice_ioprio(tsk);
3774                         ioprio_class = task_nice_ioclass(tsk);
3775                 }
3776                 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
3777                 cfqq = *async_cfqq;
3778                 if (cfqq)
3779                         goto out;
3780         }
3781
3782         cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
3783                                      cfqd->queue->node);
3784         if (!cfqq) {
3785                 cfqq = &cfqd->oom_cfqq;
3786                 goto out;
3787         }
3788
3789         cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3790         cfq_init_prio_data(cfqq, cic);
3791         cfq_link_cfqq_cfqg(cfqq, cfqg);
3792         cfq_log_cfqq(cfqd, cfqq, "alloced");
3793
3794         if (async_cfqq) {
3795                 /* a new async queue is created, pin and remember */
3796                 cfqq->ref++;
3797                 *async_cfqq = cfqq;
3798         }
3799 out:
3800         cfqq->ref++;
3801         rcu_read_unlock();
3802         return cfqq;
3803 }
3804
3805 static void
3806 __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3807 {
3808         unsigned long elapsed = jiffies - ttime->last_end_request;
3809         elapsed = min(elapsed, 2UL * slice_idle);
3810
3811         ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3812         ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3813         ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3814 }
3815
3816 static void
3817 cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3818                         struct cfq_io_cq *cic)
3819 {
3820         if (cfq_cfqq_sync(cfqq)) {
3821                 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3822                 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3823                         cfqd->cfq_slice_idle);
3824         }
3825 #ifdef CONFIG_CFQ_GROUP_IOSCHED
3826         __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3827 #endif
3828 }
3829
3830 static void
3831 cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3832                        struct request *rq)
3833 {
3834         sector_t sdist = 0;
3835         sector_t n_sec = blk_rq_sectors(rq);
3836         if (cfqq->last_request_pos) {
3837                 if (cfqq->last_request_pos < blk_rq_pos(rq))
3838                         sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3839                 else
3840                         sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3841         }
3842
3843         cfqq->seek_history <<= 1;
3844         if (blk_queue_nonrot(cfqd->queue))
3845                 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3846         else
3847                 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3848 }
3849
3850 /*
3851  * Disable idle window if the process thinks too long or seeks so much that
3852  * it doesn't matter
3853  */
3854 static void
3855 cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3856                        struct cfq_io_cq *cic)
3857 {
3858         int old_idle, enable_idle;
3859
3860         /*
3861          * Don't idle for async or idle io prio class
3862          */
3863         if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3864                 return;
3865
3866         enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3867
3868         if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3869                 cfq_mark_cfqq_deep(cfqq);
3870
3871         if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3872                 enable_idle = 0;
3873         else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3874                  !cfqd->cfq_slice_idle ||
3875                  (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3876                 enable_idle = 0;
3877         else if (sample_valid(cic->ttime.ttime_samples)) {
3878                 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3879                         enable_idle = 0;
3880                 else
3881                         enable_idle = 1;
3882         }
3883
3884         if (old_idle != enable_idle) {
3885                 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3886                 if (enable_idle)
3887                         cfq_mark_cfqq_idle_window(cfqq);
3888                 else
3889                         cfq_clear_cfqq_idle_window(cfqq);
3890         }
3891 }
3892
3893 /*
3894  * Check if new_cfqq should preempt the currently active queue. Return 0 for
3895  * no or if we aren't sure, a 1 will cause a preempt.
3896  */
3897 static bool
3898 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3899                    struct request *rq)
3900 {
3901         struct cfq_queue *cfqq;
3902
3903         cfqq = cfqd->active_queue;
3904         if (!cfqq)
3905                 return false;
3906
3907         if (cfq_class_idle(new_cfqq))
3908                 return false;
3909
3910         if (cfq_class_idle(cfqq))
3911                 return true;
3912
3913         /*
3914          * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3915          */
3916         if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3917                 return false;
3918
3919         /*
3920          * if the new request is sync, but the currently running queue is
3921          * not, let the sync request have priority.
3922          */
3923         if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3924                 return true;
3925
3926         if (new_cfqq->cfqg != cfqq->cfqg)
3927                 return false;
3928
3929         if (cfq_slice_used(cfqq))
3930                 return true;
3931
3932         /* Allow preemption only if we are idling on sync-noidle tree */
3933         if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3934             cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3935             new_cfqq->service_tree->count == 2 &&
3936             RB_EMPTY_ROOT(&cfqq->sort_list))
3937                 return true;
3938
3939         /*
3940          * So both queues are sync. Let the new request get disk time if
3941          * it's a metadata request and the current queue is doing regular IO.
3942          */
3943         if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3944                 return true;
3945
3946         /*
3947          * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3948          */
3949         if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3950                 return true;
3951
3952         /* An idle queue should not be idle now for some reason */
3953         if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3954                 return true;
3955
3956         if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3957                 return false;
3958
3959         /*
3960          * if this request is as-good as one we would expect from the
3961          * current cfqq, let it preempt
3962          */
3963         if (cfq_rq_close(cfqd, cfqq, rq))
3964                 return true;
3965
3966         return false;
3967 }
3968
3969 /*
3970  * cfqq preempts the active queue. if we allowed preempt with no slice left,
3971  * let it have half of its nominal slice.
3972  */
3973 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3974 {
3975         enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3976
3977         cfq_log_cfqq(cfqd, cfqq, "preempt");
3978         cfq_slice_expired(cfqd, 1);
3979
3980         /*
3981          * workload type is changed, don't save slice, otherwise preempt
3982          * doesn't happen
3983          */
3984         if (old_type != cfqq_type(cfqq))
3985                 cfqq->cfqg->saved_wl_slice = 0;
3986
3987         /*
3988          * Put the new queue at the front of the of the current list,
3989          * so we know that it will be selected next.
3990          */
3991         BUG_ON(!cfq_cfqq_on_rr(cfqq));
3992
3993         cfq_service_tree_add(cfqd, cfqq, 1);
3994
3995         cfqq->slice_end = 0;
3996         cfq_mark_cfqq_slice_new(cfqq);
3997 }
3998
3999 /*
4000  * Called when a new fs request (rq) is added (to cfqq). Check if there's
4001  * something we should do about it
4002  */
4003 static void
4004 cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4005                 struct request *rq)
4006 {
4007         struct cfq_io_cq *cic = RQ_CIC(rq);
4008
4009         cfqd->rq_queued++;
4010         if (rq->cmd_flags & REQ_PRIO)
4011                 cfqq->prio_pending++;
4012
4013         cfq_update_io_thinktime(cfqd, cfqq, cic);
4014         cfq_update_io_seektime(cfqd, cfqq, rq);
4015         cfq_update_idle_window(cfqd, cfqq, cic);
4016
4017         cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4018
4019         if (cfqq == cfqd->active_queue) {
4020                 /*
4021                  * Remember that we saw a request from this process, but
4022                  * don't start queuing just yet. Otherwise we risk seeing lots
4023                  * of tiny requests, because we disrupt the normal plugging
4024                  * and merging. If the request is already larger than a single
4025                  * page, let it rip immediately. For that case we assume that
4026                  * merging is already done. Ditto for a busy system that
4027                  * has other work pending, don't risk delaying until the
4028                  * idle timer unplug to continue working.
4029                  */
4030                 if (cfq_cfqq_wait_request(cfqq)) {
4031                         if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
4032                             cfqd->busy_queues > 1) {
4033                                 cfq_del_timer(cfqd, cfqq);
4034                                 cfq_clear_cfqq_wait_request(cfqq);
4035                                 __blk_run_queue(cfqd->queue);
4036                         } else {
4037                                 cfqg_stats_update_idle_time(cfqq->cfqg);
4038                                 cfq_mark_cfqq_must_dispatch(cfqq);
4039                         }
4040                 }
4041         } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
4042                 /*
4043                  * not the active queue - expire current slice if it is
4044                  * idle and has expired it's mean thinktime or this new queue
4045                  * has some old slice time left and is of higher priority or
4046                  * this new queue is RT and the current one is BE
4047                  */
4048                 cfq_preempt_queue(cfqd, cfqq);
4049                 __blk_run_queue(cfqd->queue);
4050         }
4051 }
4052
4053 static void cfq_insert_request(struct request_queue *q, struct request *rq)
4054 {
4055         struct cfq_data *cfqd = q->elevator->elevator_data;
4056         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4057
4058         cfq_log_cfqq(cfqd, cfqq, "insert_request");
4059         cfq_init_prio_data(cfqq, RQ_CIC(rq));
4060
4061         rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
4062         list_add_tail(&rq->queuelist, &cfqq->fifo);
4063         cfq_add_rq_rb(rq);
4064         cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
4065                                  rq->cmd_flags);
4066         cfq_rq_enqueued(cfqd, cfqq, rq);
4067 }
4068
4069 /*
4070  * Update hw_tag based on peak queue depth over 50 samples under
4071  * sufficient load.
4072  */
4073 static void cfq_update_hw_tag(struct cfq_data *cfqd)
4074 {
4075         struct cfq_queue *cfqq = cfqd->active_queue;
4076
4077         if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4078                 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
4079
4080         if (cfqd->hw_tag == 1)
4081                 return;
4082
4083         if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
4084             cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
4085                 return;
4086
4087         /*
4088          * If active queue hasn't enough requests and can idle, cfq might not
4089          * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4090          * case
4091          */
4092         if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4093             cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
4094             CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
4095                 return;
4096
4097         if (cfqd->hw_tag_samples++ < 50)
4098                 return;
4099
4100         if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
4101                 cfqd->hw_tag = 1;
4102         else
4103                 cfqd->hw_tag = 0;
4104 }
4105
4106 static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4107 {
4108         struct cfq_io_cq *cic = cfqd->active_cic;
4109
4110         /* If the queue already has requests, don't wait */
4111         if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4112                 return false;
4113
4114         /* If there are other queues in the group, don't wait */
4115         if (cfqq->cfqg->nr_cfqq > 1)
4116                 return false;
4117
4118         /* the only queue in the group, but think time is big */
4119         if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4120                 return false;
4121
4122         if (cfq_slice_used(cfqq))
4123                 return true;
4124
4125         /* if slice left is less than think time, wait busy */
4126         if (cic && sample_valid(cic->ttime.ttime_samples)
4127             && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
4128                 return true;
4129
4130         /*
4131          * If think times is less than a jiffy than ttime_mean=0 and above
4132          * will not be true. It might happen that slice has not expired yet
4133          * but will expire soon (4-5 ns) during select_queue(). To cover the
4134          * case where think time is less than a jiffy, mark the queue wait
4135          * busy if only 1 jiffy is left in the slice.
4136          */
4137         if (cfqq->slice_end - jiffies == 1)
4138                 return true;
4139
4140         return false;
4141 }
4142
4143 static void cfq_completed_request(struct request_queue *q, struct request *rq)
4144 {
4145         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4146         struct cfq_data *cfqd = cfqq->cfqd;
4147         const int sync = rq_is_sync(rq);
4148         unsigned long now;
4149
4150         now = jiffies;
4151         cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4152                      !!(rq->cmd_flags & REQ_NOIDLE));
4153
4154         cfq_update_hw_tag(cfqd);
4155
4156         WARN_ON(!cfqd->rq_in_driver);
4157         WARN_ON(!cfqq->dispatched);
4158         cfqd->rq_in_driver--;
4159         cfqq->dispatched--;
4160         (RQ_CFQG(rq))->dispatched--;
4161         cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4162                                      rq_io_start_time_ns(rq), rq->cmd_flags);
4163
4164         cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4165
4166         if (sync) {
4167                 struct cfq_rb_root *st;
4168
4169                 RQ_CIC(rq)->ttime.last_end_request = now;
4170
4171                 if (cfq_cfqq_on_rr(cfqq))
4172                         st = cfqq->service_tree;
4173                 else
4174                         st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4175                                         cfqq_type(cfqq));
4176
4177                 st->ttime.last_end_request = now;
4178                 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4179                         cfqd->last_delayed_sync = now;
4180         }
4181
4182 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4183         cfqq->cfqg->ttime.last_end_request = now;
4184 #endif
4185
4186         /*
4187          * If this is the active queue, check if it needs to be expired,
4188          * or if we want to idle in case it has no pending requests.
4189          */
4190         if (cfqd->active_queue == cfqq) {
4191                 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4192
4193                 if (cfq_cfqq_slice_new(cfqq)) {
4194                         cfq_set_prio_slice(cfqd, cfqq);
4195                         cfq_clear_cfqq_slice_new(cfqq);
4196                 }
4197
4198                 /*
4199                  * Should we wait for next request to come in before we expire
4200                  * the queue.
4201                  */
4202                 if (cfq_should_wait_busy(cfqd, cfqq)) {
4203                         unsigned long extend_sl = cfqd->cfq_slice_idle;
4204                         if (!cfqd->cfq_slice_idle)
4205                                 extend_sl = cfqd->cfq_group_idle;
4206                         cfqq->slice_end = jiffies + extend_sl;
4207                         cfq_mark_cfqq_wait_busy(cfqq);
4208                         cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4209                 }
4210
4211                 /*
4212                  * Idling is not enabled on:
4213                  * - expired queues
4214                  * - idle-priority queues
4215                  * - async queues
4216                  * - queues with still some requests queued
4217                  * - when there is a close cooperator
4218                  */
4219                 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4220                         cfq_slice_expired(cfqd, 1);
4221                 else if (sync && cfqq_empty &&
4222                          !cfq_close_cooperator(cfqd, cfqq)) {
4223                         cfq_arm_slice_timer(cfqd);
4224                 }
4225         }
4226
4227         if (!cfqd->rq_in_driver)
4228                 cfq_schedule_dispatch(cfqd);
4229 }
4230
4231 static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4232 {
4233         if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4234                 cfq_mark_cfqq_must_alloc_slice(cfqq);
4235                 return ELV_MQUEUE_MUST;
4236         }
4237
4238         return ELV_MQUEUE_MAY;
4239 }
4240
4241 static int cfq_may_queue(struct request_queue *q, int rw)
4242 {
4243         struct cfq_data *cfqd = q->elevator->elevator_data;
4244         struct task_struct *tsk = current;
4245         struct cfq_io_cq *cic;
4246         struct cfq_queue *cfqq;
4247
4248         /*
4249          * don't force setup of a queue from here, as a call to may_queue
4250          * does not necessarily imply that a request actually will be queued.
4251          * so just lookup a possibly existing queue, or return 'may queue'
4252          * if that fails
4253          */
4254         cic = cfq_cic_lookup(cfqd, tsk->io_context);
4255         if (!cic)
4256                 return ELV_MQUEUE_MAY;
4257
4258         cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4259         if (cfqq) {
4260                 cfq_init_prio_data(cfqq, cic);
4261
4262                 return __cfq_may_queue(cfqq);
4263         }
4264
4265         return ELV_MQUEUE_MAY;
4266 }
4267
4268 /*
4269  * queue lock held here
4270  */
4271 static void cfq_put_request(struct request *rq)
4272 {
4273         struct cfq_queue *cfqq = RQ_CFQQ(rq);
4274
4275         if (cfqq) {
4276                 const int rw = rq_data_dir(rq);
4277
4278                 BUG_ON(!cfqq->allocated[rw]);
4279                 cfqq->allocated[rw]--;
4280
4281                 /* Put down rq reference on cfqg */
4282                 cfqg_put(RQ_CFQG(rq));
4283                 rq->elv.priv[0] = NULL;
4284                 rq->elv.priv[1] = NULL;
4285
4286                 cfq_put_queue(cfqq);
4287         }
4288 }
4289
4290 static struct cfq_queue *
4291 cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4292                 struct cfq_queue *cfqq)
4293 {
4294         cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4295         cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4296         cfq_mark_cfqq_coop(cfqq->new_cfqq);
4297         cfq_put_queue(cfqq);
4298         return cic_to_cfqq(cic, 1);
4299 }
4300
4301 /*
4302  * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4303  * was the last process referring to said cfqq.
4304  */
4305 static struct cfq_queue *
4306 split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4307 {
4308         if (cfqq_process_refs(cfqq) == 1) {
4309                 cfqq->pid = current->pid;
4310                 cfq_clear_cfqq_coop(cfqq);
4311                 cfq_clear_cfqq_split_coop(cfqq);
4312                 return cfqq;
4313         }
4314
4315         cic_set_cfqq(cic, NULL, 1);
4316
4317         cfq_put_cooperator(cfqq);
4318
4319         cfq_put_queue(cfqq);
4320         return NULL;
4321 }
4322 /*
4323  * Allocate cfq data structures associated with this request.
4324  */
4325 static int
4326 cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4327                 gfp_t gfp_mask)
4328 {
4329         struct cfq_data *cfqd = q->elevator->elevator_data;
4330         struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4331         const int rw = rq_data_dir(rq);
4332         const bool is_sync = rq_is_sync(rq);
4333         struct cfq_queue *cfqq;
4334
4335         spin_lock_irq(q->queue_lock);
4336
4337         check_ioprio_changed(cic, bio);
4338         check_blkcg_changed(cic, bio);
4339 new_queue:
4340         cfqq = cic_to_cfqq(cic, is_sync);
4341         if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4342                 if (cfqq)
4343                         cfq_put_queue(cfqq);
4344                 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
4345                 cic_set_cfqq(cic, cfqq, is_sync);
4346         } else {
4347                 /*
4348                  * If the queue was seeky for too long, break it apart.
4349                  */
4350                 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4351                         cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4352                         cfqq = split_cfqq(cic, cfqq);
4353                         if (!cfqq)
4354                                 goto new_queue;
4355                 }
4356
4357                 /*
4358                  * Check to see if this queue is scheduled to merge with
4359                  * another, closely cooperating queue.  The merging of
4360                  * queues happens here as it must be done in process context.
4361                  * The reference on new_cfqq was taken in merge_cfqqs.
4362                  */
4363                 if (cfqq->new_cfqq)
4364                         cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4365         }
4366
4367         cfqq->allocated[rw]++;
4368
4369         cfqq->ref++;
4370         cfqg_get(cfqq->cfqg);
4371         rq->elv.priv[0] = cfqq;
4372         rq->elv.priv[1] = cfqq->cfqg;
4373         spin_unlock_irq(q->queue_lock);
4374         return 0;
4375 }
4376
4377 static void cfq_kick_queue(struct work_struct *work)
4378 {
4379         struct cfq_data *cfqd =
4380                 container_of(work, struct cfq_data, unplug_work);
4381         struct request_queue *q = cfqd->queue;
4382
4383         spin_lock_irq(q->queue_lock);
4384         __blk_run_queue(cfqd->queue);
4385         spin_unlock_irq(q->queue_lock);
4386 }
4387
4388 /*
4389  * Timer running if the active_queue is currently idling inside its time slice
4390  */
4391 static void cfq_idle_slice_timer(unsigned long data)
4392 {
4393         struct cfq_data *cfqd = (struct cfq_data *) data;
4394         struct cfq_queue *cfqq;
4395         unsigned long flags;
4396         int timed_out = 1;
4397
4398         cfq_log(cfqd, "idle timer fired");
4399
4400         spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4401
4402         cfqq = cfqd->active_queue;
4403         if (cfqq) {
4404                 timed_out = 0;
4405
4406                 /*
4407                  * We saw a request before the queue expired, let it through
4408                  */
4409                 if (cfq_cfqq_must_dispatch(cfqq))
4410                         goto out_kick;
4411
4412                 /*
4413                  * expired
4414                  */
4415                 if (cfq_slice_used(cfqq))
4416                         goto expire;
4417
4418                 /*
4419                  * only expire and reinvoke request handler, if there are
4420                  * other queues with pending requests
4421                  */
4422                 if (!cfqd->busy_queues)
4423                         goto out_cont;
4424
4425                 /*
4426                  * not expired and it has a request pending, let it dispatch
4427                  */
4428                 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4429                         goto out_kick;
4430
4431                 /*
4432                  * Queue depth flag is reset only when the idle didn't succeed
4433                  */
4434                 cfq_clear_cfqq_deep(cfqq);
4435         }
4436 expire:
4437         cfq_slice_expired(cfqd, timed_out);
4438 out_kick:
4439         cfq_schedule_dispatch(cfqd);
4440 out_cont:
4441         spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4442 }
4443
4444 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4445 {
4446         del_timer_sync(&cfqd->idle_slice_timer);
4447         cancel_work_sync(&cfqd->unplug_work);
4448 }
4449
4450 static void cfq_exit_queue(struct elevator_queue *e)
4451 {
4452         struct cfq_data *cfqd = e->elevator_data;
4453         struct request_queue *q = cfqd->queue;
4454
4455         cfq_shutdown_timer_wq(cfqd);
4456
4457         spin_lock_irq(q->queue_lock);
4458
4459         if (cfqd->active_queue)
4460                 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4461
4462         spin_unlock_irq(q->queue_lock);
4463
4464         cfq_shutdown_timer_wq(cfqd);
4465
4466 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4467         blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4468 #else
4469         kfree(cfqd->root_group);
4470 #endif
4471         kfree(cfqd);
4472 }
4473
4474 static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4475 {
4476         struct cfq_data *cfqd;
4477         struct blkcg_gq *blkg __maybe_unused;
4478         int i, ret;
4479         struct elevator_queue *eq;
4480
4481         eq = elevator_alloc(q, e);
4482         if (!eq)
4483                 return -ENOMEM;
4484
4485         cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4486         if (!cfqd) {
4487                 kobject_put(&eq->kobj);
4488                 return -ENOMEM;
4489         }
4490         eq->elevator_data = cfqd;
4491
4492         cfqd->queue = q;
4493         spin_lock_irq(q->queue_lock);
4494         q->elevator = eq;
4495         spin_unlock_irq(q->queue_lock);
4496
4497         /* Init root service tree */
4498         cfqd->grp_service_tree = CFQ_RB_ROOT;
4499
4500         /* Init root group and prefer root group over other groups by default */
4501 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4502         ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4503         if (ret)
4504                 goto out_free;
4505
4506         cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4507 #else
4508         ret = -ENOMEM;
4509         cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4510                                         GFP_KERNEL, cfqd->queue->node);
4511         if (!cfqd->root_group)
4512                 goto out_free;
4513
4514         cfq_init_cfqg_base(cfqd->root_group);
4515 #endif
4516         cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4517         cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4518
4519         /*
4520          * Not strictly needed (since RB_ROOT just clears the node and we
4521          * zeroed cfqd on alloc), but better be safe in case someone decides
4522          * to add magic to the rb code
4523          */
4524         for (i = 0; i < CFQ_PRIO_LISTS; i++)
4525                 cfqd->prio_trees[i] = RB_ROOT;
4526
4527         /*
4528          * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
4529          * Grab a permanent reference to it, so that the normal code flow
4530          * will not attempt to free it.  oom_cfqq is linked to root_group
4531          * but shouldn't hold a reference as it'll never be unlinked.  Lose
4532          * the reference from linking right away.
4533          */
4534         cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4535         cfqd->oom_cfqq.ref++;
4536
4537         spin_lock_irq(q->queue_lock);
4538         cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4539         cfqg_put(cfqd->root_group);
4540         spin_unlock_irq(q->queue_lock);
4541
4542         init_timer(&cfqd->idle_slice_timer);
4543         cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4544         cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4545
4546         INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4547
4548         cfqd->cfq_quantum = cfq_quantum;
4549         cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4550         cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4551         cfqd->cfq_back_max = cfq_back_max;
4552         cfqd->cfq_back_penalty = cfq_back_penalty;
4553         cfqd->cfq_slice[0] = cfq_slice_async;
4554         cfqd->cfq_slice[1] = cfq_slice_sync;
4555         cfqd->cfq_target_latency = cfq_target_latency;
4556         cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4557         cfqd->cfq_slice_idle = cfq_slice_idle;
4558         cfqd->cfq_group_idle = cfq_group_idle;
4559         cfqd->cfq_latency = 1;
4560         cfqd->hw_tag = -1;
4561         /*
4562          * we optimistically start assuming sync ops weren't delayed in last
4563          * second, in order to have larger depth for async operations.
4564          */
4565         cfqd->last_delayed_sync = jiffies - HZ;
4566         return 0;
4567
4568 out_free:
4569         kfree(cfqd);
4570         kobject_put(&eq->kobj);
4571         return ret;
4572 }
4573
4574 static void cfq_registered_queue(struct request_queue *q)
4575 {
4576         struct elevator_queue *e = q->elevator;
4577         struct cfq_data *cfqd = e->elevator_data;
4578
4579         /*
4580          * Default to IOPS mode with no idling for SSDs
4581          */
4582         if (blk_queue_nonrot(q))
4583                 cfqd->cfq_slice_idle = 0;
4584 }
4585
4586 /*
4587  * sysfs parts below -->
4588  */
4589 static ssize_t
4590 cfq_var_show(unsigned int var, char *page)
4591 {
4592         return sprintf(page, "%u\n", var);
4593 }
4594
4595 static ssize_t
4596 cfq_var_store(unsigned int *var, const char *page, size_t count)
4597 {
4598         char *p = (char *) page;
4599
4600         *var = simple_strtoul(p, &p, 10);
4601         return count;
4602 }
4603
4604 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
4605 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
4606 {                                                                       \
4607         struct cfq_data *cfqd = e->elevator_data;                       \
4608         unsigned int __data = __VAR;                                    \
4609         if (__CONV)                                                     \
4610                 __data = jiffies_to_msecs(__data);                      \
4611         return cfq_var_show(__data, (page));                            \
4612 }
4613 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4614 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4615 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4616 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4617 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4618 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4619 SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4620 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4621 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4622 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4623 SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4624 SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4625 #undef SHOW_FUNCTION
4626
4627 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
4628 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4629 {                                                                       \
4630         struct cfq_data *cfqd = e->elevator_data;                       \
4631         unsigned int __data;                                            \
4632         int ret = cfq_var_store(&__data, (page), count);                \
4633         if (__data < (MIN))                                             \
4634                 __data = (MIN);                                         \
4635         else if (__data > (MAX))                                        \
4636                 __data = (MAX);                                         \
4637         if (__CONV)                                                     \
4638                 *(__PTR) = msecs_to_jiffies(__data);                    \
4639         else                                                            \
4640                 *(__PTR) = __data;                                      \
4641         return ret;                                                     \
4642 }
4643 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4644 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4645                 UINT_MAX, 1);
4646 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4647                 UINT_MAX, 1);
4648 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4649 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4650                 UINT_MAX, 0);
4651 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4652 STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4653 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4654 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4655 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4656                 UINT_MAX, 0);
4657 STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4658 STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4659 #undef STORE_FUNCTION
4660
4661 #define CFQ_ATTR(name) \
4662         __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4663
4664 static struct elv_fs_entry cfq_attrs[] = {
4665         CFQ_ATTR(quantum),
4666         CFQ_ATTR(fifo_expire_sync),
4667         CFQ_ATTR(fifo_expire_async),
4668         CFQ_ATTR(back_seek_max),
4669         CFQ_ATTR(back_seek_penalty),
4670         CFQ_ATTR(slice_sync),
4671         CFQ_ATTR(slice_async),
4672         CFQ_ATTR(slice_async_rq),
4673         CFQ_ATTR(slice_idle),
4674         CFQ_ATTR(group_idle),
4675         CFQ_ATTR(low_latency),
4676         CFQ_ATTR(target_latency),
4677         __ATTR_NULL
4678 };
4679
4680 static struct elevator_type iosched_cfq = {
4681         .ops = {
4682                 .elevator_merge_fn =            cfq_merge,
4683                 .elevator_merged_fn =           cfq_merged_request,
4684                 .elevator_merge_req_fn =        cfq_merged_requests,
4685                 .elevator_allow_merge_fn =      cfq_allow_merge,
4686                 .elevator_bio_merged_fn =       cfq_bio_merged,
4687                 .elevator_dispatch_fn =         cfq_dispatch_requests,
4688                 .elevator_add_req_fn =          cfq_insert_request,
4689                 .elevator_activate_req_fn =     cfq_activate_request,
4690                 .elevator_deactivate_req_fn =   cfq_deactivate_request,
4691                 .elevator_completed_req_fn =    cfq_completed_request,
4692                 .elevator_former_req_fn =       elv_rb_former_request,
4693                 .elevator_latter_req_fn =       elv_rb_latter_request,
4694                 .elevator_init_icq_fn =         cfq_init_icq,
4695                 .elevator_exit_icq_fn =         cfq_exit_icq,
4696                 .elevator_set_req_fn =          cfq_set_request,
4697                 .elevator_put_req_fn =          cfq_put_request,
4698                 .elevator_may_queue_fn =        cfq_may_queue,
4699                 .elevator_init_fn =             cfq_init_queue,
4700                 .elevator_exit_fn =             cfq_exit_queue,
4701                 .elevator_registered_fn =       cfq_registered_queue,
4702         },
4703         .icq_size       =       sizeof(struct cfq_io_cq),
4704         .icq_align      =       __alignof__(struct cfq_io_cq),
4705         .elevator_attrs =       cfq_attrs,
4706         .elevator_name  =       "cfq",
4707         .elevator_owner =       THIS_MODULE,
4708 };
4709
4710 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4711 static struct blkcg_policy blkcg_policy_cfq = {
4712         .dfl_cftypes            = cfq_blkcg_files,
4713         .legacy_cftypes         = cfq_blkcg_legacy_files,
4714
4715         .cpd_alloc_fn           = cfq_cpd_alloc,
4716         .cpd_init_fn            = cfq_cpd_init,
4717         .cpd_free_fn            = cfq_cpd_free,
4718
4719         .pd_alloc_fn            = cfq_pd_alloc,
4720         .pd_init_fn             = cfq_pd_init,
4721         .pd_offline_fn          = cfq_pd_offline,
4722         .pd_free_fn             = cfq_pd_free,
4723         .pd_reset_stats_fn      = cfq_pd_reset_stats,
4724 };
4725 #endif
4726
4727 static int __init cfq_init(void)
4728 {
4729         int ret;
4730
4731         /*
4732          * could be 0 on HZ < 1000 setups
4733          */
4734         if (!cfq_slice_async)
4735                 cfq_slice_async = 1;
4736         if (!cfq_slice_idle)
4737                 cfq_slice_idle = 1;
4738
4739 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4740         if (!cfq_group_idle)
4741                 cfq_group_idle = 1;
4742
4743         ret = blkcg_policy_register(&blkcg_policy_cfq);
4744         if (ret)
4745                 return ret;
4746 #else
4747         cfq_group_idle = 0;
4748 #endif
4749
4750         ret = -ENOMEM;
4751         cfq_pool = KMEM_CACHE(cfq_queue, 0);
4752         if (!cfq_pool)
4753                 goto err_pol_unreg;
4754
4755         ret = elv_register(&iosched_cfq);
4756         if (ret)
4757                 goto err_free_pool;
4758
4759         return 0;
4760
4761 err_free_pool:
4762         kmem_cache_destroy(cfq_pool);
4763 err_pol_unreg:
4764 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4765         blkcg_policy_unregister(&blkcg_policy_cfq);
4766 #endif
4767         return ret;
4768 }
4769
4770 static void __exit cfq_exit(void)
4771 {
4772 #ifdef CONFIG_CFQ_GROUP_IOSCHED
4773         blkcg_policy_unregister(&blkcg_policy_cfq);
4774 #endif
4775         elv_unregister(&iosched_cfq);
4776         kmem_cache_destroy(cfq_pool);
4777 }
4778
4779 module_init(cfq_init);
4780 module_exit(cfq_exit);
4781
4782 MODULE_AUTHOR("Jens Axboe");
4783 MODULE_LICENSE("GPL");
4784 MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");