]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - block/deadline-iosched.c
TCP: Fix and simplify microsecond rtt sampling
[karo-tx-linux.git] / block / deadline-iosched.c
1 /*
2  *  Deadline i/o scheduler.
3  *
4  *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5  */
6 #include <linux/kernel.h>
7 #include <linux/fs.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/hash.h>
17 #include <linux/rbtree.h>
18
19 /*
20  * See Documentation/block/deadline-iosched.txt
21  */
22 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
23 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
24 static const int writes_starved = 2;    /* max times reads can starve a write */
25 static const int fifo_batch = 16;       /* # of sequential requests treated as one
26                                      by the above parameters. For throughput. */
27
28 static const int deadline_hash_shift = 5;
29 #define DL_HASH_BLOCK(sec)      ((sec) >> 3)
30 #define DL_HASH_FN(sec)         (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31 #define DL_HASH_ENTRIES         (1 << deadline_hash_shift)
32 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
33 #define list_entry_hash(ptr)    list_entry((ptr), struct deadline_rq, hash)
34 #define ON_HASH(drq)            (drq)->on_hash
35
36 struct deadline_data {
37         /*
38          * run time data
39          */
40
41         /*
42          * requests (deadline_rq s) are present on both sort_list and fifo_list
43          */
44         struct rb_root sort_list[2];    
45         struct list_head fifo_list[2];
46         
47         /*
48          * next in sort order. read, write or both are NULL
49          */
50         struct deadline_rq *next_drq[2];
51         struct list_head *hash;         /* request hash */
52         unsigned int batching;          /* number of sequential requests made */
53         sector_t last_sector;           /* head position */
54         unsigned int starved;           /* times reads have starved writes */
55
56         /*
57          * settings that change how the i/o scheduler behaves
58          */
59         int fifo_expire[2];
60         int fifo_batch;
61         int writes_starved;
62         int front_merges;
63
64         mempool_t *drq_pool;
65 };
66
67 /*
68  * pre-request data.
69  */
70 struct deadline_rq {
71         /*
72          * rbtree index, key is the starting offset
73          */
74         struct rb_node rb_node;
75         sector_t rb_key;
76
77         struct request *request;
78
79         /*
80          * request hash, key is the ending offset (for back merge lookup)
81          */
82         struct list_head hash;
83         char on_hash;
84
85         /*
86          * expire fifo
87          */
88         struct list_head fifo;
89         unsigned long expires;
90 };
91
92 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
93
94 static kmem_cache_t *drq_pool;
95
96 #define RQ_DATA(rq)     ((struct deadline_rq *) (rq)->elevator_private)
97
98 /*
99  * the back merge hash support functions
100  */
101 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
102 {
103         drq->on_hash = 0;
104         list_del_init(&drq->hash);
105 }
106
107 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
108 {
109         if (ON_HASH(drq))
110                 __deadline_del_drq_hash(drq);
111 }
112
113 static inline void
114 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
115 {
116         struct request *rq = drq->request;
117
118         BUG_ON(ON_HASH(drq));
119
120         drq->on_hash = 1;
121         list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
122 }
123
124 /*
125  * move hot entry to front of chain
126  */
127 static inline void
128 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
129 {
130         struct request *rq = drq->request;
131         struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
132
133         if (ON_HASH(drq) && drq->hash.prev != head) {
134                 list_del(&drq->hash);
135                 list_add(&drq->hash, head);
136         }
137 }
138
139 static struct request *
140 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
141 {
142         struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
143         struct list_head *entry, *next = hash_list->next;
144
145         while ((entry = next) != hash_list) {
146                 struct deadline_rq *drq = list_entry_hash(entry);
147                 struct request *__rq = drq->request;
148
149                 next = entry->next;
150                 
151                 BUG_ON(!ON_HASH(drq));
152
153                 if (!rq_mergeable(__rq)) {
154                         __deadline_del_drq_hash(drq);
155                         continue;
156                 }
157
158                 if (rq_hash_key(__rq) == offset)
159                         return __rq;
160         }
161
162         return NULL;
163 }
164
165 /*
166  * rb tree support functions
167  */
168 #define RB_NONE         (2)
169 #define RB_EMPTY(root)  ((root)->rb_node == NULL)
170 #define ON_RB(node)     ((node)->rb_color != RB_NONE)
171 #define RB_CLEAR(node)  ((node)->rb_color = RB_NONE)
172 #define rb_entry_drq(node)      rb_entry((node), struct deadline_rq, rb_node)
173 #define DRQ_RB_ROOT(dd, drq)    (&(dd)->sort_list[rq_data_dir((drq)->request)])
174 #define rq_rb_key(rq)           (rq)->sector
175
176 static struct deadline_rq *
177 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
178 {
179         struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
180         struct rb_node *parent = NULL;
181         struct deadline_rq *__drq;
182
183         while (*p) {
184                 parent = *p;
185                 __drq = rb_entry_drq(parent);
186
187                 if (drq->rb_key < __drq->rb_key)
188                         p = &(*p)->rb_left;
189                 else if (drq->rb_key > __drq->rb_key)
190                         p = &(*p)->rb_right;
191                 else
192                         return __drq;
193         }
194
195         rb_link_node(&drq->rb_node, parent, p);
196         return NULL;
197 }
198
199 static void
200 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
201 {
202         struct deadline_rq *__alias;
203
204         drq->rb_key = rq_rb_key(drq->request);
205
206 retry:
207         __alias = __deadline_add_drq_rb(dd, drq);
208         if (!__alias) {
209                 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
210                 return;
211         }
212
213         deadline_move_request(dd, __alias);
214         goto retry;
215 }
216
217 static inline void
218 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
219 {
220         const int data_dir = rq_data_dir(drq->request);
221
222         if (dd->next_drq[data_dir] == drq) {
223                 struct rb_node *rbnext = rb_next(&drq->rb_node);
224
225                 dd->next_drq[data_dir] = NULL;
226                 if (rbnext)
227                         dd->next_drq[data_dir] = rb_entry_drq(rbnext);
228         }
229
230         BUG_ON(!ON_RB(&drq->rb_node));
231         rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
232         RB_CLEAR(&drq->rb_node);
233 }
234
235 static struct request *
236 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
237 {
238         struct rb_node *n = dd->sort_list[data_dir].rb_node;
239         struct deadline_rq *drq;
240
241         while (n) {
242                 drq = rb_entry_drq(n);
243
244                 if (sector < drq->rb_key)
245                         n = n->rb_left;
246                 else if (sector > drq->rb_key)
247                         n = n->rb_right;
248                 else
249                         return drq->request;
250         }
251
252         return NULL;
253 }
254
255 /*
256  * deadline_find_first_drq finds the first (lowest sector numbered) request
257  * for the specified data_dir. Used to sweep back to the start of the disk
258  * (1-way elevator) after we process the last (highest sector) request.
259  */
260 static struct deadline_rq *
261 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
262 {
263         struct rb_node *n = dd->sort_list[data_dir].rb_node;
264
265         for (;;) {
266                 if (n->rb_left == NULL)
267                         return rb_entry_drq(n);
268                 
269                 n = n->rb_left;
270         }
271 }
272
273 /*
274  * add drq to rbtree and fifo
275  */
276 static void
277 deadline_add_request(struct request_queue *q, struct request *rq)
278 {
279         struct deadline_data *dd = q->elevator->elevator_data;
280         struct deadline_rq *drq = RQ_DATA(rq);
281
282         const int data_dir = rq_data_dir(drq->request);
283
284         deadline_add_drq_rb(dd, drq);
285         /*
286          * set expire time (only used for reads) and add to fifo list
287          */
288         drq->expires = jiffies + dd->fifo_expire[data_dir];
289         list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
290
291         if (rq_mergeable(rq))
292                 deadline_add_drq_hash(dd, drq);
293 }
294
295 /*
296  * remove rq from rbtree, fifo, and hash
297  */
298 static void deadline_remove_request(request_queue_t *q, struct request *rq)
299 {
300         struct deadline_rq *drq = RQ_DATA(rq);
301         struct deadline_data *dd = q->elevator->elevator_data;
302
303         list_del_init(&drq->fifo);
304         deadline_del_drq_rb(dd, drq);
305         deadline_del_drq_hash(drq);
306 }
307
308 static int
309 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
310 {
311         struct deadline_data *dd = q->elevator->elevator_data;
312         struct request *__rq;
313         int ret;
314
315         /*
316          * see if the merge hash can satisfy a back merge
317          */
318         __rq = deadline_find_drq_hash(dd, bio->bi_sector);
319         if (__rq) {
320                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
321
322                 if (elv_rq_merge_ok(__rq, bio)) {
323                         ret = ELEVATOR_BACK_MERGE;
324                         goto out;
325                 }
326         }
327
328         /*
329          * check for front merge
330          */
331         if (dd->front_merges) {
332                 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
333
334                 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
335                 if (__rq) {
336                         BUG_ON(rb_key != rq_rb_key(__rq));
337
338                         if (elv_rq_merge_ok(__rq, bio)) {
339                                 ret = ELEVATOR_FRONT_MERGE;
340                                 goto out;
341                         }
342                 }
343         }
344
345         return ELEVATOR_NO_MERGE;
346 out:
347         if (ret)
348                 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
349         *req = __rq;
350         return ret;
351 }
352
353 static void deadline_merged_request(request_queue_t *q, struct request *req)
354 {
355         struct deadline_data *dd = q->elevator->elevator_data;
356         struct deadline_rq *drq = RQ_DATA(req);
357
358         /*
359          * hash always needs to be repositioned, key is end sector
360          */
361         deadline_del_drq_hash(drq);
362         deadline_add_drq_hash(dd, drq);
363
364         /*
365          * if the merge was a front merge, we need to reposition request
366          */
367         if (rq_rb_key(req) != drq->rb_key) {
368                 deadline_del_drq_rb(dd, drq);
369                 deadline_add_drq_rb(dd, drq);
370         }
371 }
372
373 static void
374 deadline_merged_requests(request_queue_t *q, struct request *req,
375                          struct request *next)
376 {
377         struct deadline_data *dd = q->elevator->elevator_data;
378         struct deadline_rq *drq = RQ_DATA(req);
379         struct deadline_rq *dnext = RQ_DATA(next);
380
381         BUG_ON(!drq);
382         BUG_ON(!dnext);
383
384         /*
385          * reposition drq (this is the merged request) in hash, and in rbtree
386          * in case of a front merge
387          */
388         deadline_del_drq_hash(drq);
389         deadline_add_drq_hash(dd, drq);
390
391         if (rq_rb_key(req) != drq->rb_key) {
392                 deadline_del_drq_rb(dd, drq);
393                 deadline_add_drq_rb(dd, drq);
394         }
395
396         /*
397          * if dnext expires before drq, assign its expire time to drq
398          * and move into dnext position (dnext will be deleted) in fifo
399          */
400         if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
401                 if (time_before(dnext->expires, drq->expires)) {
402                         list_move(&drq->fifo, &dnext->fifo);
403                         drq->expires = dnext->expires;
404                 }
405         }
406
407         /*
408          * kill knowledge of next, this one is a goner
409          */
410         deadline_remove_request(q, next);
411 }
412
413 /*
414  * move request from sort list to dispatch queue.
415  */
416 static inline void
417 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
418 {
419         request_queue_t *q = drq->request->q;
420
421         deadline_remove_request(q, drq->request);
422         elv_dispatch_add_tail(q, drq->request);
423 }
424
425 /*
426  * move an entry to dispatch queue
427  */
428 static void
429 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
430 {
431         const int data_dir = rq_data_dir(drq->request);
432         struct rb_node *rbnext = rb_next(&drq->rb_node);
433
434         dd->next_drq[READ] = NULL;
435         dd->next_drq[WRITE] = NULL;
436
437         if (rbnext)
438                 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
439         
440         dd->last_sector = drq->request->sector + drq->request->nr_sectors;
441
442         /*
443          * take it off the sort and fifo list, move
444          * to dispatch queue
445          */
446         deadline_move_to_dispatch(dd, drq);
447 }
448
449 #define list_entry_fifo(ptr)    list_entry((ptr), struct deadline_rq, fifo)
450
451 /*
452  * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
453  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
454  */
455 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
456 {
457         struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
458
459         /*
460          * drq is expired!
461          */
462         if (time_after(jiffies, drq->expires))
463                 return 1;
464
465         return 0;
466 }
467
468 /*
469  * deadline_dispatch_requests selects the best request according to
470  * read/write expire, fifo_batch, etc
471  */
472 static int deadline_dispatch_requests(request_queue_t *q, int force)
473 {
474         struct deadline_data *dd = q->elevator->elevator_data;
475         const int reads = !list_empty(&dd->fifo_list[READ]);
476         const int writes = !list_empty(&dd->fifo_list[WRITE]);
477         struct deadline_rq *drq;
478         int data_dir;
479
480         /*
481          * batches are currently reads XOR writes
482          */
483         if (dd->next_drq[WRITE])
484                 drq = dd->next_drq[WRITE];
485         else
486                 drq = dd->next_drq[READ];
487
488         if (drq) {
489                 /* we have a "next request" */
490                 
491                 if (dd->last_sector != drq->request->sector)
492                         /* end the batch on a non sequential request */
493                         dd->batching += dd->fifo_batch;
494                 
495                 if (dd->batching < dd->fifo_batch)
496                         /* we are still entitled to batch */
497                         goto dispatch_request;
498         }
499
500         /*
501          * at this point we are not running a batch. select the appropriate
502          * data direction (read / write)
503          */
504
505         if (reads) {
506                 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
507
508                 if (writes && (dd->starved++ >= dd->writes_starved))
509                         goto dispatch_writes;
510
511                 data_dir = READ;
512
513                 goto dispatch_find_request;
514         }
515
516         /*
517          * there are either no reads or writes have been starved
518          */
519
520         if (writes) {
521 dispatch_writes:
522                 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
523
524                 dd->starved = 0;
525
526                 data_dir = WRITE;
527
528                 goto dispatch_find_request;
529         }
530
531         return 0;
532
533 dispatch_find_request:
534         /*
535          * we are not running a batch, find best request for selected data_dir
536          */
537         if (deadline_check_fifo(dd, data_dir)) {
538                 /* An expired request exists - satisfy it */
539                 dd->batching = 0;
540                 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
541                 
542         } else if (dd->next_drq[data_dir]) {
543                 /*
544                  * The last req was the same dir and we have a next request in
545                  * sort order. No expired requests so continue on from here.
546                  */
547                 drq = dd->next_drq[data_dir];
548         } else {
549                 /*
550                  * The last req was the other direction or we have run out of
551                  * higher-sectored requests. Go back to the lowest sectored
552                  * request (1 way elevator) and start a new batch.
553                  */
554                 dd->batching = 0;
555                 drq = deadline_find_first_drq(dd, data_dir);
556         }
557
558 dispatch_request:
559         /*
560          * drq is the selected appropriate request.
561          */
562         dd->batching++;
563         deadline_move_request(dd, drq);
564
565         return 1;
566 }
567
568 static int deadline_queue_empty(request_queue_t *q)
569 {
570         struct deadline_data *dd = q->elevator->elevator_data;
571
572         return list_empty(&dd->fifo_list[WRITE])
573                 && list_empty(&dd->fifo_list[READ]);
574 }
575
576 static struct request *
577 deadline_former_request(request_queue_t *q, struct request *rq)
578 {
579         struct deadline_rq *drq = RQ_DATA(rq);
580         struct rb_node *rbprev = rb_prev(&drq->rb_node);
581
582         if (rbprev)
583                 return rb_entry_drq(rbprev)->request;
584
585         return NULL;
586 }
587
588 static struct request *
589 deadline_latter_request(request_queue_t *q, struct request *rq)
590 {
591         struct deadline_rq *drq = RQ_DATA(rq);
592         struct rb_node *rbnext = rb_next(&drq->rb_node);
593
594         if (rbnext)
595                 return rb_entry_drq(rbnext)->request;
596
597         return NULL;
598 }
599
600 static void deadline_exit_queue(elevator_t *e)
601 {
602         struct deadline_data *dd = e->elevator_data;
603
604         BUG_ON(!list_empty(&dd->fifo_list[READ]));
605         BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
606
607         mempool_destroy(dd->drq_pool);
608         kfree(dd->hash);
609         kfree(dd);
610 }
611
612 /*
613  * initialize elevator private data (deadline_data), and alloc a drq for
614  * each request on the free lists
615  */
616 static int deadline_init_queue(request_queue_t *q, elevator_t *e)
617 {
618         struct deadline_data *dd;
619         int i;
620
621         if (!drq_pool)
622                 return -ENOMEM;
623
624         dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
625         if (!dd)
626                 return -ENOMEM;
627         memset(dd, 0, sizeof(*dd));
628
629         dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
630                                 GFP_KERNEL, q->node);
631         if (!dd->hash) {
632                 kfree(dd);
633                 return -ENOMEM;
634         }
635
636         dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
637                                         mempool_free_slab, drq_pool, q->node);
638         if (!dd->drq_pool) {
639                 kfree(dd->hash);
640                 kfree(dd);
641                 return -ENOMEM;
642         }
643
644         for (i = 0; i < DL_HASH_ENTRIES; i++)
645                 INIT_LIST_HEAD(&dd->hash[i]);
646
647         INIT_LIST_HEAD(&dd->fifo_list[READ]);
648         INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
649         dd->sort_list[READ] = RB_ROOT;
650         dd->sort_list[WRITE] = RB_ROOT;
651         dd->fifo_expire[READ] = read_expire;
652         dd->fifo_expire[WRITE] = write_expire;
653         dd->writes_starved = writes_starved;
654         dd->front_merges = 1;
655         dd->fifo_batch = fifo_batch;
656         e->elevator_data = dd;
657         return 0;
658 }
659
660 static void deadline_put_request(request_queue_t *q, struct request *rq)
661 {
662         struct deadline_data *dd = q->elevator->elevator_data;
663         struct deadline_rq *drq = RQ_DATA(rq);
664
665         mempool_free(drq, dd->drq_pool);
666         rq->elevator_private = NULL;
667 }
668
669 static int
670 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
671                      gfp_t gfp_mask)
672 {
673         struct deadline_data *dd = q->elevator->elevator_data;
674         struct deadline_rq *drq;
675
676         drq = mempool_alloc(dd->drq_pool, gfp_mask);
677         if (drq) {
678                 memset(drq, 0, sizeof(*drq));
679                 RB_CLEAR(&drq->rb_node);
680                 drq->request = rq;
681
682                 INIT_LIST_HEAD(&drq->hash);
683                 drq->on_hash = 0;
684
685                 INIT_LIST_HEAD(&drq->fifo);
686
687                 rq->elevator_private = drq;
688                 return 0;
689         }
690
691         return 1;
692 }
693
694 /*
695  * sysfs parts below
696  */
697 struct deadline_fs_entry {
698         struct attribute attr;
699         ssize_t (*show)(struct deadline_data *, char *);
700         ssize_t (*store)(struct deadline_data *, const char *, size_t);
701 };
702
703 static ssize_t
704 deadline_var_show(int var, char *page)
705 {
706         return sprintf(page, "%d\n", var);
707 }
708
709 static ssize_t
710 deadline_var_store(int *var, const char *page, size_t count)
711 {
712         char *p = (char *) page;
713
714         *var = simple_strtol(p, &p, 10);
715         return count;
716 }
717
718 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
719 static ssize_t __FUNC(struct deadline_data *dd, char *page)             \
720 {                                                                       \
721         int __data = __VAR;                                     \
722         if (__CONV)                                                     \
723                 __data = jiffies_to_msecs(__data);                      \
724         return deadline_var_show(__data, (page));                       \
725 }
726 SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
727 SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
728 SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
729 SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
730 SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
731 #undef SHOW_FUNCTION
732
733 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
734 static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
735 {                                                                       \
736         int __data;                                                     \
737         int ret = deadline_var_store(&__data, (page), count);           \
738         if (__data < (MIN))                                             \
739                 __data = (MIN);                                         \
740         else if (__data > (MAX))                                        \
741                 __data = (MAX);                                         \
742         if (__CONV)                                                     \
743                 *(__PTR) = msecs_to_jiffies(__data);                    \
744         else                                                            \
745                 *(__PTR) = __data;                                      \
746         return ret;                                                     \
747 }
748 STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
749 STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
750 STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
751 STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
752 STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
753 #undef STORE_FUNCTION
754
755 static struct deadline_fs_entry deadline_readexpire_entry = {
756         .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
757         .show = deadline_readexpire_show,
758         .store = deadline_readexpire_store,
759 };
760 static struct deadline_fs_entry deadline_writeexpire_entry = {
761         .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
762         .show = deadline_writeexpire_show,
763         .store = deadline_writeexpire_store,
764 };
765 static struct deadline_fs_entry deadline_writesstarved_entry = {
766         .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
767         .show = deadline_writesstarved_show,
768         .store = deadline_writesstarved_store,
769 };
770 static struct deadline_fs_entry deadline_frontmerges_entry = {
771         .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
772         .show = deadline_frontmerges_show,
773         .store = deadline_frontmerges_store,
774 };
775 static struct deadline_fs_entry deadline_fifobatch_entry = {
776         .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
777         .show = deadline_fifobatch_show,
778         .store = deadline_fifobatch_store,
779 };
780
781 static struct attribute *default_attrs[] = {
782         &deadline_readexpire_entry.attr,
783         &deadline_writeexpire_entry.attr,
784         &deadline_writesstarved_entry.attr,
785         &deadline_frontmerges_entry.attr,
786         &deadline_fifobatch_entry.attr,
787         NULL,
788 };
789
790 #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
791
792 static ssize_t
793 deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
794 {
795         elevator_t *e = container_of(kobj, elevator_t, kobj);
796         struct deadline_fs_entry *entry = to_deadline(attr);
797
798         if (!entry->show)
799                 return -EIO;
800
801         return entry->show(e->elevator_data, page);
802 }
803
804 static ssize_t
805 deadline_attr_store(struct kobject *kobj, struct attribute *attr,
806                     const char *page, size_t length)
807 {
808         elevator_t *e = container_of(kobj, elevator_t, kobj);
809         struct deadline_fs_entry *entry = to_deadline(attr);
810
811         if (!entry->store)
812                 return -EIO;
813
814         return entry->store(e->elevator_data, page, length);
815 }
816
817 static struct sysfs_ops deadline_sysfs_ops = {
818         .show   = deadline_attr_show,
819         .store  = deadline_attr_store,
820 };
821
822 static struct kobj_type deadline_ktype = {
823         .sysfs_ops      = &deadline_sysfs_ops,
824         .default_attrs  = default_attrs,
825 };
826
827 static struct elevator_type iosched_deadline = {
828         .ops = {
829                 .elevator_merge_fn =            deadline_merge,
830                 .elevator_merged_fn =           deadline_merged_request,
831                 .elevator_merge_req_fn =        deadline_merged_requests,
832                 .elevator_dispatch_fn =         deadline_dispatch_requests,
833                 .elevator_add_req_fn =          deadline_add_request,
834                 .elevator_queue_empty_fn =      deadline_queue_empty,
835                 .elevator_former_req_fn =       deadline_former_request,
836                 .elevator_latter_req_fn =       deadline_latter_request,
837                 .elevator_set_req_fn =          deadline_set_request,
838                 .elevator_put_req_fn =          deadline_put_request,
839                 .elevator_init_fn =             deadline_init_queue,
840                 .elevator_exit_fn =             deadline_exit_queue,
841         },
842
843         .elevator_ktype = &deadline_ktype,
844         .elevator_name = "deadline",
845         .elevator_owner = THIS_MODULE,
846 };
847
848 static int __init deadline_init(void)
849 {
850         int ret;
851
852         drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
853                                      0, 0, NULL, NULL);
854
855         if (!drq_pool)
856                 return -ENOMEM;
857
858         ret = elv_register(&iosched_deadline);
859         if (ret)
860                 kmem_cache_destroy(drq_pool);
861
862         return ret;
863 }
864
865 static void __exit deadline_exit(void)
866 {
867         kmem_cache_destroy(drq_pool);
868         elv_unregister(&iosched_deadline);
869 }
870
871 module_init(deadline_init);
872 module_exit(deadline_exit);
873
874 MODULE_AUTHOR("Jens Axboe");
875 MODULE_LICENSE("GPL");
876 MODULE_DESCRIPTION("deadline IO scheduler");