]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/block/xen-blkfront.c
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / drivers / block / xen-blkfront.c
1 /*
2  * blkfront.c
3  *
4  * XenLinux virtual block device driver.
5  *
6  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8  * Copyright (c) 2004, Christian Limpach
9  * Copyright (c) 2004, Andrew Warfield
10  * Copyright (c) 2005, Christopher Clark
11  * Copyright (c) 2005, XenSource Ltd
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/cdrom.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/mutex.h>
46 #include <linux/scatterlist.h>
47 #include <linux/bitmap.h>
48 #include <linux/list.h>
49
50 #include <xen/xen.h>
51 #include <xen/xenbus.h>
52 #include <xen/grant_table.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56
57 #include <xen/interface/grant_table.h>
58 #include <xen/interface/io/blkif.h>
59 #include <xen/interface/io/protocols.h>
60
61 #include <asm/xen/hypervisor.h>
62
63 /*
64  * The minimal size of segment supported by the block framework is PAGE_SIZE.
65  * When Linux is using a different page size than Xen, it may not be possible
66  * to put all the data in a single segment.
67  * This can happen when the backend doesn't support indirect descriptor and
68  * therefore the maximum amount of data that a request can carry is
69  * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
70  *
71  * Note that we only support one extra request. So the Linux page size
72  * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
73  * 88KB.
74  */
75 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
76
77 enum blkif_state {
78         BLKIF_STATE_DISCONNECTED,
79         BLKIF_STATE_CONNECTED,
80         BLKIF_STATE_SUSPENDED,
81 };
82
83 struct grant {
84         grant_ref_t gref;
85         struct page *page;
86         struct list_head node;
87 };
88
89 enum blk_req_status {
90         REQ_WAITING,
91         REQ_DONE,
92         REQ_ERROR,
93         REQ_EOPNOTSUPP,
94 };
95
96 struct blk_shadow {
97         struct blkif_request req;
98         struct request *request;
99         struct grant **grants_used;
100         struct grant **indirect_grants;
101         struct scatterlist *sg;
102         unsigned int num_sg;
103         enum blk_req_status status;
104
105         #define NO_ASSOCIATED_ID ~0UL
106         /*
107          * Id of the sibling if we ever need 2 requests when handling a
108          * block I/O request
109          */
110         unsigned long associated_id;
111 };
112
113 struct split_bio {
114         struct bio *bio;
115         atomic_t pending;
116 };
117
118 static DEFINE_MUTEX(blkfront_mutex);
119 static const struct block_device_operations xlvbd_block_fops;
120
121 /*
122  * Maximum number of segments in indirect requests, the actual value used by
123  * the frontend driver is the minimum of this value and the value provided
124  * by the backend driver.
125  */
126
127 static unsigned int xen_blkif_max_segments = 32;
128 module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
129                    S_IRUGO);
130 MODULE_PARM_DESC(max_indirect_segments,
131                  "Maximum amount of segments in indirect requests (default is 32)");
132
133 static unsigned int xen_blkif_max_queues = 4;
134 module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
135 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
136
137 /*
138  * Maximum order of pages to be used for the shared ring between front and
139  * backend, 4KB page granularity is used.
140  */
141 static unsigned int xen_blkif_max_ring_order;
142 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
143 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
144
145 #define BLK_RING_SIZE(info)     \
146         __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
147
148 #define BLK_MAX_RING_SIZE       \
149         __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
150
151 /*
152  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
153  * characters are enough. Define to 20 to keep consistent with backend.
154  */
155 #define RINGREF_NAME_LEN (20)
156 /*
157  * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
158  */
159 #define QUEUE_NAME_LEN (17)
160
161 /*
162  *  Per-ring info.
163  *  Every blkfront device can associate with one or more blkfront_ring_info,
164  *  depending on how many hardware queues/rings to be used.
165  */
166 struct blkfront_ring_info {
167         /* Lock to protect data in every ring buffer. */
168         spinlock_t ring_lock;
169         struct blkif_front_ring ring;
170         unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
171         unsigned int evtchn, irq;
172         struct work_struct work;
173         struct gnttab_free_callback callback;
174         struct blk_shadow shadow[BLK_MAX_RING_SIZE];
175         struct list_head indirect_pages;
176         struct list_head grants;
177         unsigned int persistent_gnts_c;
178         unsigned long shadow_free;
179         struct blkfront_info *dev_info;
180 };
181
182 /*
183  * We have one of these per vbd, whether ide, scsi or 'other'.  They
184  * hang in private_data off the gendisk structure. We may end up
185  * putting all kinds of interesting stuff here :-)
186  */
187 struct blkfront_info
188 {
189         struct mutex mutex;
190         struct xenbus_device *xbdev;
191         struct gendisk *gd;
192         u16 sector_size;
193         unsigned int physical_sector_size;
194         int vdevice;
195         blkif_vdev_t handle;
196         enum blkif_state connected;
197         /* Number of pages per ring buffer. */
198         unsigned int nr_ring_pages;
199         struct request_queue *rq;
200         unsigned int feature_flush:1;
201         unsigned int feature_fua:1;
202         unsigned int feature_discard:1;
203         unsigned int feature_secdiscard:1;
204         unsigned int feature_persistent:1;
205         unsigned int discard_granularity;
206         unsigned int discard_alignment;
207         /* Number of 4KB segments handled */
208         unsigned int max_indirect_segments;
209         int is_ready;
210         struct blk_mq_tag_set tag_set;
211         struct blkfront_ring_info *rinfo;
212         unsigned int nr_rings;
213         /* Save uncomplete reqs and bios for migration. */
214         struct list_head requests;
215         struct bio_list bio_list;
216 };
217
218 static unsigned int nr_minors;
219 static unsigned long *minors;
220 static DEFINE_SPINLOCK(minor_lock);
221
222 #define GRANT_INVALID_REF       0
223
224 #define PARTS_PER_DISK          16
225 #define PARTS_PER_EXT_DISK      256
226
227 #define BLKIF_MAJOR(dev) ((dev)>>8)
228 #define BLKIF_MINOR(dev) ((dev) & 0xff)
229
230 #define EXT_SHIFT 28
231 #define EXTENDED (1<<EXT_SHIFT)
232 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
233 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
234 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
235 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
236 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
237 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
238
239 #define DEV_NAME        "xvd"   /* name in /dev */
240
241 /*
242  * Grants are always the same size as a Xen page (i.e 4KB).
243  * A physical segment is always the same size as a Linux page.
244  * Number of grants per physical segment
245  */
246 #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
247
248 #define GRANTS_PER_INDIRECT_FRAME \
249         (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
250
251 #define PSEGS_PER_INDIRECT_FRAME        \
252         (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
253
254 #define INDIRECT_GREFS(_grants)         \
255         DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
256
257 #define GREFS(_psegs)   ((_psegs) * GRANTS_PER_PSEG)
258
259 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
260 static void blkfront_gather_backend_features(struct blkfront_info *info);
261
262 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
263 {
264         unsigned long free = rinfo->shadow_free;
265
266         BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
267         rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
268         rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
269         return free;
270 }
271
272 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
273                               unsigned long id)
274 {
275         if (rinfo->shadow[id].req.u.rw.id != id)
276                 return -EINVAL;
277         if (rinfo->shadow[id].request == NULL)
278                 return -EINVAL;
279         rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
280         rinfo->shadow[id].request = NULL;
281         rinfo->shadow_free = id;
282         return 0;
283 }
284
285 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
286 {
287         struct blkfront_info *info = rinfo->dev_info;
288         struct page *granted_page;
289         struct grant *gnt_list_entry, *n;
290         int i = 0;
291
292         while (i < num) {
293                 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
294                 if (!gnt_list_entry)
295                         goto out_of_memory;
296
297                 if (info->feature_persistent) {
298                         granted_page = alloc_page(GFP_NOIO);
299                         if (!granted_page) {
300                                 kfree(gnt_list_entry);
301                                 goto out_of_memory;
302                         }
303                         gnt_list_entry->page = granted_page;
304                 }
305
306                 gnt_list_entry->gref = GRANT_INVALID_REF;
307                 list_add(&gnt_list_entry->node, &rinfo->grants);
308                 i++;
309         }
310
311         return 0;
312
313 out_of_memory:
314         list_for_each_entry_safe(gnt_list_entry, n,
315                                  &rinfo->grants, node) {
316                 list_del(&gnt_list_entry->node);
317                 if (info->feature_persistent)
318                         __free_page(gnt_list_entry->page);
319                 kfree(gnt_list_entry);
320                 i--;
321         }
322         BUG_ON(i != 0);
323         return -ENOMEM;
324 }
325
326 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
327 {
328         struct grant *gnt_list_entry;
329
330         BUG_ON(list_empty(&rinfo->grants));
331         gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
332                                           node);
333         list_del(&gnt_list_entry->node);
334
335         if (gnt_list_entry->gref != GRANT_INVALID_REF)
336                 rinfo->persistent_gnts_c--;
337
338         return gnt_list_entry;
339 }
340
341 static inline void grant_foreign_access(const struct grant *gnt_list_entry,
342                                         const struct blkfront_info *info)
343 {
344         gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
345                                                  info->xbdev->otherend_id,
346                                                  gnt_list_entry->page,
347                                                  0);
348 }
349
350 static struct grant *get_grant(grant_ref_t *gref_head,
351                                unsigned long gfn,
352                                struct blkfront_ring_info *rinfo)
353 {
354         struct grant *gnt_list_entry = get_free_grant(rinfo);
355         struct blkfront_info *info = rinfo->dev_info;
356
357         if (gnt_list_entry->gref != GRANT_INVALID_REF)
358                 return gnt_list_entry;
359
360         /* Assign a gref to this page */
361         gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
362         BUG_ON(gnt_list_entry->gref == -ENOSPC);
363         if (info->feature_persistent)
364                 grant_foreign_access(gnt_list_entry, info);
365         else {
366                 /* Grant access to the GFN passed by the caller */
367                 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
368                                                 info->xbdev->otherend_id,
369                                                 gfn, 0);
370         }
371
372         return gnt_list_entry;
373 }
374
375 static struct grant *get_indirect_grant(grant_ref_t *gref_head,
376                                         struct blkfront_ring_info *rinfo)
377 {
378         struct grant *gnt_list_entry = get_free_grant(rinfo);
379         struct blkfront_info *info = rinfo->dev_info;
380
381         if (gnt_list_entry->gref != GRANT_INVALID_REF)
382                 return gnt_list_entry;
383
384         /* Assign a gref to this page */
385         gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
386         BUG_ON(gnt_list_entry->gref == -ENOSPC);
387         if (!info->feature_persistent) {
388                 struct page *indirect_page;
389
390                 /* Fetch a pre-allocated page to use for indirect grefs */
391                 BUG_ON(list_empty(&rinfo->indirect_pages));
392                 indirect_page = list_first_entry(&rinfo->indirect_pages,
393                                                  struct page, lru);
394                 list_del(&indirect_page->lru);
395                 gnt_list_entry->page = indirect_page;
396         }
397         grant_foreign_access(gnt_list_entry, info);
398
399         return gnt_list_entry;
400 }
401
402 static const char *op_name(int op)
403 {
404         static const char *const names[] = {
405                 [BLKIF_OP_READ] = "read",
406                 [BLKIF_OP_WRITE] = "write",
407                 [BLKIF_OP_WRITE_BARRIER] = "barrier",
408                 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
409                 [BLKIF_OP_DISCARD] = "discard" };
410
411         if (op < 0 || op >= ARRAY_SIZE(names))
412                 return "unknown";
413
414         if (!names[op])
415                 return "reserved";
416
417         return names[op];
418 }
419 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
420 {
421         unsigned int end = minor + nr;
422         int rc;
423
424         if (end > nr_minors) {
425                 unsigned long *bitmap, *old;
426
427                 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
428                                  GFP_KERNEL);
429                 if (bitmap == NULL)
430                         return -ENOMEM;
431
432                 spin_lock(&minor_lock);
433                 if (end > nr_minors) {
434                         old = minors;
435                         memcpy(bitmap, minors,
436                                BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
437                         minors = bitmap;
438                         nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
439                 } else
440                         old = bitmap;
441                 spin_unlock(&minor_lock);
442                 kfree(old);
443         }
444
445         spin_lock(&minor_lock);
446         if (find_next_bit(minors, end, minor) >= end) {
447                 bitmap_set(minors, minor, nr);
448                 rc = 0;
449         } else
450                 rc = -EBUSY;
451         spin_unlock(&minor_lock);
452
453         return rc;
454 }
455
456 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
457 {
458         unsigned int end = minor + nr;
459
460         BUG_ON(end > nr_minors);
461         spin_lock(&minor_lock);
462         bitmap_clear(minors,  minor, nr);
463         spin_unlock(&minor_lock);
464 }
465
466 static void blkif_restart_queue_callback(void *arg)
467 {
468         struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
469         schedule_work(&rinfo->work);
470 }
471
472 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
473 {
474         /* We don't have real geometry info, but let's at least return
475            values consistent with the size of the device */
476         sector_t nsect = get_capacity(bd->bd_disk);
477         sector_t cylinders = nsect;
478
479         hg->heads = 0xff;
480         hg->sectors = 0x3f;
481         sector_div(cylinders, hg->heads * hg->sectors);
482         hg->cylinders = cylinders;
483         if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
484                 hg->cylinders = 0xffff;
485         return 0;
486 }
487
488 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
489                        unsigned command, unsigned long argument)
490 {
491         struct blkfront_info *info = bdev->bd_disk->private_data;
492         int i;
493
494         dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
495                 command, (long)argument);
496
497         switch (command) {
498         case CDROMMULTISESSION:
499                 dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
500                 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
501                         if (put_user(0, (char __user *)(argument + i)))
502                                 return -EFAULT;
503                 return 0;
504
505         case CDROM_GET_CAPABILITY: {
506                 struct gendisk *gd = info->gd;
507                 if (gd->flags & GENHD_FL_CD)
508                         return 0;
509                 return -EINVAL;
510         }
511
512         default:
513                 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
514                   command);*/
515                 return -EINVAL; /* same return as native Linux */
516         }
517
518         return 0;
519 }
520
521 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
522                                             struct request *req,
523                                             struct blkif_request **ring_req)
524 {
525         unsigned long id;
526
527         *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
528         rinfo->ring.req_prod_pvt++;
529
530         id = get_id_from_freelist(rinfo);
531         rinfo->shadow[id].request = req;
532         rinfo->shadow[id].status = REQ_WAITING;
533         rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
534
535         (*ring_req)->u.rw.id = id;
536
537         return id;
538 }
539
540 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
541 {
542         struct blkfront_info *info = rinfo->dev_info;
543         struct blkif_request *ring_req;
544         unsigned long id;
545
546         /* Fill out a communications ring structure. */
547         id = blkif_ring_get_request(rinfo, req, &ring_req);
548
549         ring_req->operation = BLKIF_OP_DISCARD;
550         ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
551         ring_req->u.discard.id = id;
552         ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
553         if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
554                 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
555         else
556                 ring_req->u.discard.flag = 0;
557
558         /* Keep a private copy so we can reissue requests when recovering. */
559         rinfo->shadow[id].req = *ring_req;
560
561         return 0;
562 }
563
564 struct setup_rw_req {
565         unsigned int grant_idx;
566         struct blkif_request_segment *segments;
567         struct blkfront_ring_info *rinfo;
568         struct blkif_request *ring_req;
569         grant_ref_t gref_head;
570         unsigned int id;
571         /* Only used when persistent grant is used and it's a read request */
572         bool need_copy;
573         unsigned int bvec_off;
574         char *bvec_data;
575
576         bool require_extra_req;
577         struct blkif_request *extra_ring_req;
578 };
579
580 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
581                                      unsigned int len, void *data)
582 {
583         struct setup_rw_req *setup = data;
584         int n, ref;
585         struct grant *gnt_list_entry;
586         unsigned int fsect, lsect;
587         /* Convenient aliases */
588         unsigned int grant_idx = setup->grant_idx;
589         struct blkif_request *ring_req = setup->ring_req;
590         struct blkfront_ring_info *rinfo = setup->rinfo;
591         /*
592          * We always use the shadow of the first request to store the list
593          * of grant associated to the block I/O request. This made the
594          * completion more easy to handle even if the block I/O request is
595          * split.
596          */
597         struct blk_shadow *shadow = &rinfo->shadow[setup->id];
598
599         if (unlikely(setup->require_extra_req &&
600                      grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
601                 /*
602                  * We are using the second request, setup grant_idx
603                  * to be the index of the segment array.
604                  */
605                 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
606                 ring_req = setup->extra_ring_req;
607         }
608
609         if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
610             (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
611                 if (setup->segments)
612                         kunmap_atomic(setup->segments);
613
614                 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
615                 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
616                 shadow->indirect_grants[n] = gnt_list_entry;
617                 setup->segments = kmap_atomic(gnt_list_entry->page);
618                 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
619         }
620
621         gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
622         ref = gnt_list_entry->gref;
623         /*
624          * All the grants are stored in the shadow of the first
625          * request. Therefore we have to use the global index.
626          */
627         shadow->grants_used[setup->grant_idx] = gnt_list_entry;
628
629         if (setup->need_copy) {
630                 void *shared_data;
631
632                 shared_data = kmap_atomic(gnt_list_entry->page);
633                 /*
634                  * this does not wipe data stored outside the
635                  * range sg->offset..sg->offset+sg->length.
636                  * Therefore, blkback *could* see data from
637                  * previous requests. This is OK as long as
638                  * persistent grants are shared with just one
639                  * domain. It may need refactoring if this
640                  * changes
641                  */
642                 memcpy(shared_data + offset,
643                        setup->bvec_data + setup->bvec_off,
644                        len);
645
646                 kunmap_atomic(shared_data);
647                 setup->bvec_off += len;
648         }
649
650         fsect = offset >> 9;
651         lsect = fsect + (len >> 9) - 1;
652         if (ring_req->operation != BLKIF_OP_INDIRECT) {
653                 ring_req->u.rw.seg[grant_idx] =
654                         (struct blkif_request_segment) {
655                                 .gref       = ref,
656                                 .first_sect = fsect,
657                                 .last_sect  = lsect };
658         } else {
659                 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
660                         (struct blkif_request_segment) {
661                                 .gref       = ref,
662                                 .first_sect = fsect,
663                                 .last_sect  = lsect };
664         }
665
666         (setup->grant_idx)++;
667 }
668
669 static void blkif_setup_extra_req(struct blkif_request *first,
670                                   struct blkif_request *second)
671 {
672         uint16_t nr_segments = first->u.rw.nr_segments;
673
674         /*
675          * The second request is only present when the first request uses
676          * all its segments. It's always the continuity of the first one.
677          */
678         first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
679
680         second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
681         second->u.rw.sector_number = first->u.rw.sector_number +
682                 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
683
684         second->u.rw.handle = first->u.rw.handle;
685         second->operation = first->operation;
686 }
687
688 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
689 {
690         struct blkfront_info *info = rinfo->dev_info;
691         struct blkif_request *ring_req, *extra_ring_req = NULL;
692         unsigned long id, extra_id = NO_ASSOCIATED_ID;
693         bool require_extra_req = false;
694         int i;
695         struct setup_rw_req setup = {
696                 .grant_idx = 0,
697                 .segments = NULL,
698                 .rinfo = rinfo,
699                 .need_copy = rq_data_dir(req) && info->feature_persistent,
700         };
701
702         /*
703          * Used to store if we are able to queue the request by just using
704          * existing persistent grants, or if we have to get new grants,
705          * as there are not sufficiently many free.
706          */
707         struct scatterlist *sg;
708         int num_sg, max_grefs, num_grant;
709
710         max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
711         if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
712                 /*
713                  * If we are using indirect segments we need to account
714                  * for the indirect grefs used in the request.
715                  */
716                 max_grefs += INDIRECT_GREFS(max_grefs);
717
718         /*
719          * We have to reserve 'max_grefs' grants because persistent
720          * grants are shared by all rings.
721          */
722         if (max_grefs > 0)
723                 if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
724                         gnttab_request_free_callback(
725                                 &rinfo->callback,
726                                 blkif_restart_queue_callback,
727                                 rinfo,
728                                 max_grefs);
729                         return 1;
730                 }
731
732         /* Fill out a communications ring structure. */
733         id = blkif_ring_get_request(rinfo, req, &ring_req);
734
735         num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
736         num_grant = 0;
737         /* Calculate the number of grant used */
738         for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
739                num_grant += gnttab_count_grant(sg->offset, sg->length);
740
741         require_extra_req = info->max_indirect_segments == 0 &&
742                 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
743         BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
744
745         rinfo->shadow[id].num_sg = num_sg;
746         if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
747             likely(!require_extra_req)) {
748                 /*
749                  * The indirect operation can only be a BLKIF_OP_READ or
750                  * BLKIF_OP_WRITE
751                  */
752                 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
753                 ring_req->operation = BLKIF_OP_INDIRECT;
754                 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
755                         BLKIF_OP_WRITE : BLKIF_OP_READ;
756                 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
757                 ring_req->u.indirect.handle = info->handle;
758                 ring_req->u.indirect.nr_segments = num_grant;
759         } else {
760                 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
761                 ring_req->u.rw.handle = info->handle;
762                 ring_req->operation = rq_data_dir(req) ?
763                         BLKIF_OP_WRITE : BLKIF_OP_READ;
764                 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
765                         /*
766                          * Ideally we can do an unordered flush-to-disk.
767                          * In case the backend onlysupports barriers, use that.
768                          * A barrier request a superset of FUA, so we can
769                          * implement it the same way.  (It's also a FLUSH+FUA,
770                          * since it is guaranteed ordered WRT previous writes.)
771                          */
772                         if (info->feature_flush && info->feature_fua)
773                                 ring_req->operation =
774                                         BLKIF_OP_WRITE_BARRIER;
775                         else if (info->feature_flush)
776                                 ring_req->operation =
777                                         BLKIF_OP_FLUSH_DISKCACHE;
778                         else
779                                 ring_req->operation = 0;
780                 }
781                 ring_req->u.rw.nr_segments = num_grant;
782                 if (unlikely(require_extra_req)) {
783                         extra_id = blkif_ring_get_request(rinfo, req,
784                                                           &extra_ring_req);
785                         /*
786                          * Only the first request contains the scatter-gather
787                          * list.
788                          */
789                         rinfo->shadow[extra_id].num_sg = 0;
790
791                         blkif_setup_extra_req(ring_req, extra_ring_req);
792
793                         /* Link the 2 requests together */
794                         rinfo->shadow[extra_id].associated_id = id;
795                         rinfo->shadow[id].associated_id = extra_id;
796                 }
797         }
798
799         setup.ring_req = ring_req;
800         setup.id = id;
801
802         setup.require_extra_req = require_extra_req;
803         if (unlikely(require_extra_req))
804                 setup.extra_ring_req = extra_ring_req;
805
806         for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
807                 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
808
809                 if (setup.need_copy) {
810                         setup.bvec_off = sg->offset;
811                         setup.bvec_data = kmap_atomic(sg_page(sg));
812                 }
813
814                 gnttab_foreach_grant_in_range(sg_page(sg),
815                                               sg->offset,
816                                               sg->length,
817                                               blkif_setup_rw_req_grant,
818                                               &setup);
819
820                 if (setup.need_copy)
821                         kunmap_atomic(setup.bvec_data);
822         }
823         if (setup.segments)
824                 kunmap_atomic(setup.segments);
825
826         /* Keep a private copy so we can reissue requests when recovering. */
827         rinfo->shadow[id].req = *ring_req;
828         if (unlikely(require_extra_req))
829                 rinfo->shadow[extra_id].req = *extra_ring_req;
830
831         if (max_grefs > 0)
832                 gnttab_free_grant_references(setup.gref_head);
833
834         return 0;
835 }
836
837 /*
838  * Generate a Xen blkfront IO request from a blk layer request.  Reads
839  * and writes are handled as expected.
840  *
841  * @req: a request struct
842  */
843 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
844 {
845         if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
846                 return 1;
847
848         if (unlikely(req_op(req) == REQ_OP_DISCARD ||
849                      req_op(req) == REQ_OP_SECURE_ERASE))
850                 return blkif_queue_discard_req(req, rinfo);
851         else
852                 return blkif_queue_rw_req(req, rinfo);
853 }
854
855 static inline void flush_requests(struct blkfront_ring_info *rinfo)
856 {
857         int notify;
858
859         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
860
861         if (notify)
862                 notify_remote_via_irq(rinfo->irq);
863 }
864
865 static inline bool blkif_request_flush_invalid(struct request *req,
866                                                struct blkfront_info *info)
867 {
868         return (blk_rq_is_passthrough(req) ||
869                 ((req_op(req) == REQ_OP_FLUSH) &&
870                  !info->feature_flush) ||
871                 ((req->cmd_flags & REQ_FUA) &&
872                  !info->feature_fua));
873 }
874
875 static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
876                           const struct blk_mq_queue_data *qd)
877 {
878         unsigned long flags;
879         int qid = hctx->queue_num;
880         struct blkfront_info *info = hctx->queue->queuedata;
881         struct blkfront_ring_info *rinfo = NULL;
882
883         BUG_ON(info->nr_rings <= qid);
884         rinfo = &info->rinfo[qid];
885         blk_mq_start_request(qd->rq);
886         spin_lock_irqsave(&rinfo->ring_lock, flags);
887         if (RING_FULL(&rinfo->ring))
888                 goto out_busy;
889
890         if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
891                 goto out_err;
892
893         if (blkif_queue_request(qd->rq, rinfo))
894                 goto out_busy;
895
896         flush_requests(rinfo);
897         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
898         return BLK_MQ_RQ_QUEUE_OK;
899
900 out_err:
901         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
902         return BLK_MQ_RQ_QUEUE_ERROR;
903
904 out_busy:
905         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
906         blk_mq_stop_hw_queue(hctx);
907         return BLK_MQ_RQ_QUEUE_BUSY;
908 }
909
910 static struct blk_mq_ops blkfront_mq_ops = {
911         .queue_rq = blkif_queue_rq,
912 };
913
914 static void blkif_set_queue_limits(struct blkfront_info *info)
915 {
916         struct request_queue *rq = info->rq;
917         struct gendisk *gd = info->gd;
918         unsigned int segments = info->max_indirect_segments ? :
919                                 BLKIF_MAX_SEGMENTS_PER_REQUEST;
920
921         queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
922
923         if (info->feature_discard) {
924                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
925                 blk_queue_max_discard_sectors(rq, get_capacity(gd));
926                 rq->limits.discard_granularity = info->discard_granularity;
927                 rq->limits.discard_alignment = info->discard_alignment;
928                 if (info->feature_secdiscard)
929                         queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
930         }
931
932         /* Hard sector size and max sectors impersonate the equiv. hardware. */
933         blk_queue_logical_block_size(rq, info->sector_size);
934         blk_queue_physical_block_size(rq, info->physical_sector_size);
935         blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
936
937         /* Each segment in a request is up to an aligned page in size. */
938         blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
939         blk_queue_max_segment_size(rq, PAGE_SIZE);
940
941         /* Ensure a merged request will fit in a single I/O ring slot. */
942         blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
943
944         /* Make sure buffer addresses are sector-aligned. */
945         blk_queue_dma_alignment(rq, 511);
946
947         /* Make sure we don't use bounce buffers. */
948         blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
949 }
950
951 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
952                                 unsigned int physical_sector_size)
953 {
954         struct request_queue *rq;
955         struct blkfront_info *info = gd->private_data;
956
957         memset(&info->tag_set, 0, sizeof(info->tag_set));
958         info->tag_set.ops = &blkfront_mq_ops;
959         info->tag_set.nr_hw_queues = info->nr_rings;
960         if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
961                 /*
962                  * When indirect descriptior is not supported, the I/O request
963                  * will be split between multiple request in the ring.
964                  * To avoid problems when sending the request, divide by
965                  * 2 the depth of the queue.
966                  */
967                 info->tag_set.queue_depth =  BLK_RING_SIZE(info) / 2;
968         } else
969                 info->tag_set.queue_depth = BLK_RING_SIZE(info);
970         info->tag_set.numa_node = NUMA_NO_NODE;
971         info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
972         info->tag_set.cmd_size = 0;
973         info->tag_set.driver_data = info;
974
975         if (blk_mq_alloc_tag_set(&info->tag_set))
976                 return -EINVAL;
977         rq = blk_mq_init_queue(&info->tag_set);
978         if (IS_ERR(rq)) {
979                 blk_mq_free_tag_set(&info->tag_set);
980                 return PTR_ERR(rq);
981         }
982
983         rq->queuedata = info;
984         info->rq = gd->queue = rq;
985         info->gd = gd;
986         info->sector_size = sector_size;
987         info->physical_sector_size = physical_sector_size;
988         blkif_set_queue_limits(info);
989
990         return 0;
991 }
992
993 static const char *flush_info(struct blkfront_info *info)
994 {
995         if (info->feature_flush && info->feature_fua)
996                 return "barrier: enabled;";
997         else if (info->feature_flush)
998                 return "flush diskcache: enabled;";
999         else
1000                 return "barrier or flush: disabled;";
1001 }
1002
1003 static void xlvbd_flush(struct blkfront_info *info)
1004 {
1005         blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
1006                               info->feature_fua ? true : false);
1007         pr_info("blkfront: %s: %s %s %s %s %s\n",
1008                 info->gd->disk_name, flush_info(info),
1009                 "persistent grants:", info->feature_persistent ?
1010                 "enabled;" : "disabled;", "indirect descriptors:",
1011                 info->max_indirect_segments ? "enabled;" : "disabled;");
1012 }
1013
1014 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1015 {
1016         int major;
1017         major = BLKIF_MAJOR(vdevice);
1018         *minor = BLKIF_MINOR(vdevice);
1019         switch (major) {
1020                 case XEN_IDE0_MAJOR:
1021                         *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
1022                         *minor = ((*minor / 64) * PARTS_PER_DISK) +
1023                                 EMULATED_HD_DISK_MINOR_OFFSET;
1024                         break;
1025                 case XEN_IDE1_MAJOR:
1026                         *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1027                         *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1028                                 EMULATED_HD_DISK_MINOR_OFFSET;
1029                         break;
1030                 case XEN_SCSI_DISK0_MAJOR:
1031                         *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1032                         *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1033                         break;
1034                 case XEN_SCSI_DISK1_MAJOR:
1035                 case XEN_SCSI_DISK2_MAJOR:
1036                 case XEN_SCSI_DISK3_MAJOR:
1037                 case XEN_SCSI_DISK4_MAJOR:
1038                 case XEN_SCSI_DISK5_MAJOR:
1039                 case XEN_SCSI_DISK6_MAJOR:
1040                 case XEN_SCSI_DISK7_MAJOR:
1041                         *offset = (*minor / PARTS_PER_DISK) + 
1042                                 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1043                                 EMULATED_SD_DISK_NAME_OFFSET;
1044                         *minor = *minor +
1045                                 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1046                                 EMULATED_SD_DISK_MINOR_OFFSET;
1047                         break;
1048                 case XEN_SCSI_DISK8_MAJOR:
1049                 case XEN_SCSI_DISK9_MAJOR:
1050                 case XEN_SCSI_DISK10_MAJOR:
1051                 case XEN_SCSI_DISK11_MAJOR:
1052                 case XEN_SCSI_DISK12_MAJOR:
1053                 case XEN_SCSI_DISK13_MAJOR:
1054                 case XEN_SCSI_DISK14_MAJOR:
1055                 case XEN_SCSI_DISK15_MAJOR:
1056                         *offset = (*minor / PARTS_PER_DISK) + 
1057                                 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1058                                 EMULATED_SD_DISK_NAME_OFFSET;
1059                         *minor = *minor +
1060                                 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1061                                 EMULATED_SD_DISK_MINOR_OFFSET;
1062                         break;
1063                 case XENVBD_MAJOR:
1064                         *offset = *minor / PARTS_PER_DISK;
1065                         break;
1066                 default:
1067                         printk(KERN_WARNING "blkfront: your disk configuration is "
1068                                         "incorrect, please use an xvd device instead\n");
1069                         return -ENODEV;
1070         }
1071         return 0;
1072 }
1073
1074 static char *encode_disk_name(char *ptr, unsigned int n)
1075 {
1076         if (n >= 26)
1077                 ptr = encode_disk_name(ptr, n / 26 - 1);
1078         *ptr = 'a' + n % 26;
1079         return ptr + 1;
1080 }
1081
1082 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1083                                struct blkfront_info *info,
1084                                u16 vdisk_info, u16 sector_size,
1085                                unsigned int physical_sector_size)
1086 {
1087         struct gendisk *gd;
1088         int nr_minors = 1;
1089         int err;
1090         unsigned int offset;
1091         int minor;
1092         int nr_parts;
1093         char *ptr;
1094
1095         BUG_ON(info->gd != NULL);
1096         BUG_ON(info->rq != NULL);
1097
1098         if ((info->vdevice>>EXT_SHIFT) > 1) {
1099                 /* this is above the extended range; something is wrong */
1100                 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1101                 return -ENODEV;
1102         }
1103
1104         if (!VDEV_IS_EXTENDED(info->vdevice)) {
1105                 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1106                 if (err)
1107                         return err;             
1108                 nr_parts = PARTS_PER_DISK;
1109         } else {
1110                 minor = BLKIF_MINOR_EXT(info->vdevice);
1111                 nr_parts = PARTS_PER_EXT_DISK;
1112                 offset = minor / nr_parts;
1113                 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1114                         printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1115                                         "emulated IDE disks,\n\t choose an xvd device name"
1116                                         "from xvde on\n", info->vdevice);
1117         }
1118         if (minor >> MINORBITS) {
1119                 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1120                         info->vdevice, minor);
1121                 return -ENODEV;
1122         }
1123
1124         if ((minor % nr_parts) == 0)
1125                 nr_minors = nr_parts;
1126
1127         err = xlbd_reserve_minors(minor, nr_minors);
1128         if (err)
1129                 goto out;
1130         err = -ENODEV;
1131
1132         gd = alloc_disk(nr_minors);
1133         if (gd == NULL)
1134                 goto release;
1135
1136         strcpy(gd->disk_name, DEV_NAME);
1137         ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1138         BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1139         if (nr_minors > 1)
1140                 *ptr = 0;
1141         else
1142                 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1143                          "%d", minor & (nr_parts - 1));
1144
1145         gd->major = XENVBD_MAJOR;
1146         gd->first_minor = minor;
1147         gd->fops = &xlvbd_block_fops;
1148         gd->private_data = info;
1149         set_capacity(gd, capacity);
1150
1151         if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
1152                 del_gendisk(gd);
1153                 goto release;
1154         }
1155
1156         xlvbd_flush(info);
1157
1158         if (vdisk_info & VDISK_READONLY)
1159                 set_disk_ro(gd, 1);
1160
1161         if (vdisk_info & VDISK_REMOVABLE)
1162                 gd->flags |= GENHD_FL_REMOVABLE;
1163
1164         if (vdisk_info & VDISK_CDROM)
1165                 gd->flags |= GENHD_FL_CD;
1166
1167         return 0;
1168
1169  release:
1170         xlbd_release_minors(minor, nr_minors);
1171  out:
1172         return err;
1173 }
1174
1175 static void xlvbd_release_gendisk(struct blkfront_info *info)
1176 {
1177         unsigned int minor, nr_minors, i;
1178
1179         if (info->rq == NULL)
1180                 return;
1181
1182         /* No more blkif_request(). */
1183         blk_mq_stop_hw_queues(info->rq);
1184
1185         for (i = 0; i < info->nr_rings; i++) {
1186                 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1187
1188                 /* No more gnttab callback work. */
1189                 gnttab_cancel_free_callback(&rinfo->callback);
1190
1191                 /* Flush gnttab callback work. Must be done with no locks held. */
1192                 flush_work(&rinfo->work);
1193         }
1194
1195         del_gendisk(info->gd);
1196
1197         minor = info->gd->first_minor;
1198         nr_minors = info->gd->minors;
1199         xlbd_release_minors(minor, nr_minors);
1200
1201         blk_cleanup_queue(info->rq);
1202         blk_mq_free_tag_set(&info->tag_set);
1203         info->rq = NULL;
1204
1205         put_disk(info->gd);
1206         info->gd = NULL;
1207 }
1208
1209 /* Already hold rinfo->ring_lock. */
1210 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1211 {
1212         if (!RING_FULL(&rinfo->ring))
1213                 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1214 }
1215
1216 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1217 {
1218         unsigned long flags;
1219
1220         spin_lock_irqsave(&rinfo->ring_lock, flags);
1221         kick_pending_request_queues_locked(rinfo);
1222         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1223 }
1224
1225 static void blkif_restart_queue(struct work_struct *work)
1226 {
1227         struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1228
1229         if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1230                 kick_pending_request_queues(rinfo);
1231 }
1232
1233 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1234 {
1235         struct grant *persistent_gnt, *n;
1236         struct blkfront_info *info = rinfo->dev_info;
1237         int i, j, segs;
1238
1239         /*
1240          * Remove indirect pages, this only happens when using indirect
1241          * descriptors but not persistent grants
1242          */
1243         if (!list_empty(&rinfo->indirect_pages)) {
1244                 struct page *indirect_page, *n;
1245
1246                 BUG_ON(info->feature_persistent);
1247                 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1248                         list_del(&indirect_page->lru);
1249                         __free_page(indirect_page);
1250                 }
1251         }
1252
1253         /* Remove all persistent grants. */
1254         if (!list_empty(&rinfo->grants)) {
1255                 list_for_each_entry_safe(persistent_gnt, n,
1256                                          &rinfo->grants, node) {
1257                         list_del(&persistent_gnt->node);
1258                         if (persistent_gnt->gref != GRANT_INVALID_REF) {
1259                                 gnttab_end_foreign_access(persistent_gnt->gref,
1260                                                           0, 0UL);
1261                                 rinfo->persistent_gnts_c--;
1262                         }
1263                         if (info->feature_persistent)
1264                                 __free_page(persistent_gnt->page);
1265                         kfree(persistent_gnt);
1266                 }
1267         }
1268         BUG_ON(rinfo->persistent_gnts_c != 0);
1269
1270         for (i = 0; i < BLK_RING_SIZE(info); i++) {
1271                 /*
1272                  * Clear persistent grants present in requests already
1273                  * on the shared ring
1274                  */
1275                 if (!rinfo->shadow[i].request)
1276                         goto free_shadow;
1277
1278                 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1279                        rinfo->shadow[i].req.u.indirect.nr_segments :
1280                        rinfo->shadow[i].req.u.rw.nr_segments;
1281                 for (j = 0; j < segs; j++) {
1282                         persistent_gnt = rinfo->shadow[i].grants_used[j];
1283                         gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1284                         if (info->feature_persistent)
1285                                 __free_page(persistent_gnt->page);
1286                         kfree(persistent_gnt);
1287                 }
1288
1289                 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1290                         /*
1291                          * If this is not an indirect operation don't try to
1292                          * free indirect segments
1293                          */
1294                         goto free_shadow;
1295
1296                 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1297                         persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1298                         gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1299                         __free_page(persistent_gnt->page);
1300                         kfree(persistent_gnt);
1301                 }
1302
1303 free_shadow:
1304                 kfree(rinfo->shadow[i].grants_used);
1305                 rinfo->shadow[i].grants_used = NULL;
1306                 kfree(rinfo->shadow[i].indirect_grants);
1307                 rinfo->shadow[i].indirect_grants = NULL;
1308                 kfree(rinfo->shadow[i].sg);
1309                 rinfo->shadow[i].sg = NULL;
1310         }
1311
1312         /* No more gnttab callback work. */
1313         gnttab_cancel_free_callback(&rinfo->callback);
1314
1315         /* Flush gnttab callback work. Must be done with no locks held. */
1316         flush_work(&rinfo->work);
1317
1318         /* Free resources associated with old device channel. */
1319         for (i = 0; i < info->nr_ring_pages; i++) {
1320                 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1321                         gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1322                         rinfo->ring_ref[i] = GRANT_INVALID_REF;
1323                 }
1324         }
1325         free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
1326         rinfo->ring.sring = NULL;
1327
1328         if (rinfo->irq)
1329                 unbind_from_irqhandler(rinfo->irq, rinfo);
1330         rinfo->evtchn = rinfo->irq = 0;
1331 }
1332
1333 static void blkif_free(struct blkfront_info *info, int suspend)
1334 {
1335         unsigned int i;
1336
1337         /* Prevent new requests being issued until we fix things up. */
1338         info->connected = suspend ?
1339                 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1340         /* No more blkif_request(). */
1341         if (info->rq)
1342                 blk_mq_stop_hw_queues(info->rq);
1343
1344         for (i = 0; i < info->nr_rings; i++)
1345                 blkif_free_ring(&info->rinfo[i]);
1346
1347         kfree(info->rinfo);
1348         info->rinfo = NULL;
1349         info->nr_rings = 0;
1350 }
1351
1352 struct copy_from_grant {
1353         const struct blk_shadow *s;
1354         unsigned int grant_idx;
1355         unsigned int bvec_offset;
1356         char *bvec_data;
1357 };
1358
1359 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1360                                   unsigned int len, void *data)
1361 {
1362         struct copy_from_grant *info = data;
1363         char *shared_data;
1364         /* Convenient aliases */
1365         const struct blk_shadow *s = info->s;
1366
1367         shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1368
1369         memcpy(info->bvec_data + info->bvec_offset,
1370                shared_data + offset, len);
1371
1372         info->bvec_offset += len;
1373         info->grant_idx++;
1374
1375         kunmap_atomic(shared_data);
1376 }
1377
1378 static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1379 {
1380         switch (rsp)
1381         {
1382         case BLKIF_RSP_OKAY:
1383                 return REQ_DONE;
1384         case BLKIF_RSP_EOPNOTSUPP:
1385                 return REQ_EOPNOTSUPP;
1386         case BLKIF_RSP_ERROR:
1387                 /* Fallthrough. */
1388         default:
1389                 return REQ_ERROR;
1390         }
1391 }
1392
1393 /*
1394  * Get the final status of the block request based on two ring response
1395  */
1396 static int blkif_get_final_status(enum blk_req_status s1,
1397                                   enum blk_req_status s2)
1398 {
1399         BUG_ON(s1 == REQ_WAITING);
1400         BUG_ON(s2 == REQ_WAITING);
1401
1402         if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1403                 return BLKIF_RSP_ERROR;
1404         else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1405                 return BLKIF_RSP_EOPNOTSUPP;
1406         return BLKIF_RSP_OKAY;
1407 }
1408
1409 static bool blkif_completion(unsigned long *id,
1410                              struct blkfront_ring_info *rinfo,
1411                              struct blkif_response *bret)
1412 {
1413         int i = 0;
1414         struct scatterlist *sg;
1415         int num_sg, num_grant;
1416         struct blkfront_info *info = rinfo->dev_info;
1417         struct blk_shadow *s = &rinfo->shadow[*id];
1418         struct copy_from_grant data = {
1419                 .grant_idx = 0,
1420         };
1421
1422         num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1423                 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1424
1425         /* The I/O request may be split in two. */
1426         if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1427                 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1428
1429                 /* Keep the status of the current response in shadow. */
1430                 s->status = blkif_rsp_to_req_status(bret->status);
1431
1432                 /* Wait the second response if not yet here. */
1433                 if (s2->status == REQ_WAITING)
1434                         return 0;
1435
1436                 bret->status = blkif_get_final_status(s->status,
1437                                                       s2->status);
1438
1439                 /*
1440                  * All the grants is stored in the first shadow in order
1441                  * to make the completion code simpler.
1442                  */
1443                 num_grant += s2->req.u.rw.nr_segments;
1444
1445                 /*
1446                  * The two responses may not come in order. Only the
1447                  * first request will store the scatter-gather list.
1448                  */
1449                 if (s2->num_sg != 0) {
1450                         /* Update "id" with the ID of the first response. */
1451                         *id = s->associated_id;
1452                         s = s2;
1453                 }
1454
1455                 /*
1456                  * We don't need anymore the second request, so recycling
1457                  * it now.
1458                  */
1459                 if (add_id_to_freelist(rinfo, s->associated_id))
1460                         WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1461                              info->gd->disk_name, s->associated_id);
1462         }
1463
1464         data.s = s;
1465         num_sg = s->num_sg;
1466
1467         if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1468                 for_each_sg(s->sg, sg, num_sg, i) {
1469                         BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1470
1471                         data.bvec_offset = sg->offset;
1472                         data.bvec_data = kmap_atomic(sg_page(sg));
1473
1474                         gnttab_foreach_grant_in_range(sg_page(sg),
1475                                                       sg->offset,
1476                                                       sg->length,
1477                                                       blkif_copy_from_grant,
1478                                                       &data);
1479
1480                         kunmap_atomic(data.bvec_data);
1481                 }
1482         }
1483         /* Add the persistent grant into the list of free grants */
1484         for (i = 0; i < num_grant; i++) {
1485                 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1486                         /*
1487                          * If the grant is still mapped by the backend (the
1488                          * backend has chosen to make this grant persistent)
1489                          * we add it at the head of the list, so it will be
1490                          * reused first.
1491                          */
1492                         if (!info->feature_persistent)
1493                                 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1494                                                      s->grants_used[i]->gref);
1495                         list_add(&s->grants_used[i]->node, &rinfo->grants);
1496                         rinfo->persistent_gnts_c++;
1497                 } else {
1498                         /*
1499                          * If the grant is not mapped by the backend we end the
1500                          * foreign access and add it to the tail of the list,
1501                          * so it will not be picked again unless we run out of
1502                          * persistent grants.
1503                          */
1504                         gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1505                         s->grants_used[i]->gref = GRANT_INVALID_REF;
1506                         list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1507                 }
1508         }
1509         if (s->req.operation == BLKIF_OP_INDIRECT) {
1510                 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1511                         if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1512                                 if (!info->feature_persistent)
1513                                         pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1514                                                              s->indirect_grants[i]->gref);
1515                                 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1516                                 rinfo->persistent_gnts_c++;
1517                         } else {
1518                                 struct page *indirect_page;
1519
1520                                 gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1521                                 /*
1522                                  * Add the used indirect page back to the list of
1523                                  * available pages for indirect grefs.
1524                                  */
1525                                 if (!info->feature_persistent) {
1526                                         indirect_page = s->indirect_grants[i]->page;
1527                                         list_add(&indirect_page->lru, &rinfo->indirect_pages);
1528                                 }
1529                                 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1530                                 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1531                         }
1532                 }
1533         }
1534
1535         return 1;
1536 }
1537
1538 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1539 {
1540         struct request *req;
1541         struct blkif_response *bret;
1542         RING_IDX i, rp;
1543         unsigned long flags;
1544         struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1545         struct blkfront_info *info = rinfo->dev_info;
1546         int error;
1547
1548         if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
1549                 return IRQ_HANDLED;
1550
1551         spin_lock_irqsave(&rinfo->ring_lock, flags);
1552  again:
1553         rp = rinfo->ring.sring->rsp_prod;
1554         rmb(); /* Ensure we see queued responses up to 'rp'. */
1555
1556         for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1557                 unsigned long id;
1558
1559                 bret = RING_GET_RESPONSE(&rinfo->ring, i);
1560                 id   = bret->id;
1561                 /*
1562                  * The backend has messed up and given us an id that we would
1563                  * never have given to it (we stamp it up to BLK_RING_SIZE -
1564                  * look in get_id_from_freelist.
1565                  */
1566                 if (id >= BLK_RING_SIZE(info)) {
1567                         WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1568                              info->gd->disk_name, op_name(bret->operation), id);
1569                         /* We can't safely get the 'struct request' as
1570                          * the id is busted. */
1571                         continue;
1572                 }
1573                 req  = rinfo->shadow[id].request;
1574
1575                 if (bret->operation != BLKIF_OP_DISCARD) {
1576                         /*
1577                          * We may need to wait for an extra response if the
1578                          * I/O request is split in 2
1579                          */
1580                         if (!blkif_completion(&id, rinfo, bret))
1581                                 continue;
1582                 }
1583
1584                 if (add_id_to_freelist(rinfo, id)) {
1585                         WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1586                              info->gd->disk_name, op_name(bret->operation), id);
1587                         continue;
1588                 }
1589
1590                 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1591                 switch (bret->operation) {
1592                 case BLKIF_OP_DISCARD:
1593                         if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1594                                 struct request_queue *rq = info->rq;
1595                                 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1596                                            info->gd->disk_name, op_name(bret->operation));
1597                                 error = -EOPNOTSUPP;
1598                                 info->feature_discard = 0;
1599                                 info->feature_secdiscard = 0;
1600                                 queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1601                                 queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1602                         }
1603                         blk_mq_complete_request(req, error);
1604                         break;
1605                 case BLKIF_OP_FLUSH_DISKCACHE:
1606                 case BLKIF_OP_WRITE_BARRIER:
1607                         if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1608                                 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1609                                        info->gd->disk_name, op_name(bret->operation));
1610                                 error = -EOPNOTSUPP;
1611                         }
1612                         if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1613                                      rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1614                                 printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1615                                        info->gd->disk_name, op_name(bret->operation));
1616                                 error = -EOPNOTSUPP;
1617                         }
1618                         if (unlikely(error)) {
1619                                 if (error == -EOPNOTSUPP)
1620                                         error = 0;
1621                                 info->feature_fua = 0;
1622                                 info->feature_flush = 0;
1623                                 xlvbd_flush(info);
1624                         }
1625                         /* fall through */
1626                 case BLKIF_OP_READ:
1627                 case BLKIF_OP_WRITE:
1628                         if (unlikely(bret->status != BLKIF_RSP_OKAY))
1629                                 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1630                                         "request: %x\n", bret->status);
1631
1632                         blk_mq_complete_request(req, error);
1633                         break;
1634                 default:
1635                         BUG();
1636                 }
1637         }
1638
1639         rinfo->ring.rsp_cons = i;
1640
1641         if (i != rinfo->ring.req_prod_pvt) {
1642                 int more_to_do;
1643                 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1644                 if (more_to_do)
1645                         goto again;
1646         } else
1647                 rinfo->ring.sring->rsp_event = i + 1;
1648
1649         kick_pending_request_queues_locked(rinfo);
1650
1651         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1652
1653         return IRQ_HANDLED;
1654 }
1655
1656
1657 static int setup_blkring(struct xenbus_device *dev,
1658                          struct blkfront_ring_info *rinfo)
1659 {
1660         struct blkif_sring *sring;
1661         int err, i;
1662         struct blkfront_info *info = rinfo->dev_info;
1663         unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1664         grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1665
1666         for (i = 0; i < info->nr_ring_pages; i++)
1667                 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1668
1669         sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1670                                                        get_order(ring_size));
1671         if (!sring) {
1672                 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1673                 return -ENOMEM;
1674         }
1675         SHARED_RING_INIT(sring);
1676         FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1677
1678         err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1679         if (err < 0) {
1680                 free_pages((unsigned long)sring, get_order(ring_size));
1681                 rinfo->ring.sring = NULL;
1682                 goto fail;
1683         }
1684         for (i = 0; i < info->nr_ring_pages; i++)
1685                 rinfo->ring_ref[i] = gref[i];
1686
1687         err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1688         if (err)
1689                 goto fail;
1690
1691         err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
1692                                         "blkif", rinfo);
1693         if (err <= 0) {
1694                 xenbus_dev_fatal(dev, err,
1695                                  "bind_evtchn_to_irqhandler failed");
1696                 goto fail;
1697         }
1698         rinfo->irq = err;
1699
1700         return 0;
1701 fail:
1702         blkif_free(info, 0);
1703         return err;
1704 }
1705
1706 /*
1707  * Write out per-ring/queue nodes including ring-ref and event-channel, and each
1708  * ring buffer may have multi pages depending on ->nr_ring_pages.
1709  */
1710 static int write_per_ring_nodes(struct xenbus_transaction xbt,
1711                                 struct blkfront_ring_info *rinfo, const char *dir)
1712 {
1713         int err;
1714         unsigned int i;
1715         const char *message = NULL;
1716         struct blkfront_info *info = rinfo->dev_info;
1717
1718         if (info->nr_ring_pages == 1) {
1719                 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1720                 if (err) {
1721                         message = "writing ring-ref";
1722                         goto abort_transaction;
1723                 }
1724         } else {
1725                 for (i = 0; i < info->nr_ring_pages; i++) {
1726                         char ring_ref_name[RINGREF_NAME_LEN];
1727
1728                         snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1729                         err = xenbus_printf(xbt, dir, ring_ref_name,
1730                                             "%u", rinfo->ring_ref[i]);
1731                         if (err) {
1732                                 message = "writing ring-ref";
1733                                 goto abort_transaction;
1734                         }
1735                 }
1736         }
1737
1738         err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1739         if (err) {
1740                 message = "writing event-channel";
1741                 goto abort_transaction;
1742         }
1743
1744         return 0;
1745
1746 abort_transaction:
1747         xenbus_transaction_end(xbt, 1);
1748         if (message)
1749                 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1750
1751         return err;
1752 }
1753
1754 /* Common code used when first setting up, and when resuming. */
1755 static int talk_to_blkback(struct xenbus_device *dev,
1756                            struct blkfront_info *info)
1757 {
1758         const char *message = NULL;
1759         struct xenbus_transaction xbt;
1760         int err;
1761         unsigned int i, max_page_order;
1762         unsigned int ring_page_order;
1763
1764         max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1765                                               "max-ring-page-order", 0);
1766         ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1767         info->nr_ring_pages = 1 << ring_page_order;
1768
1769         for (i = 0; i < info->nr_rings; i++) {
1770                 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1771
1772                 /* Create shared ring, alloc event channel. */
1773                 err = setup_blkring(dev, rinfo);
1774                 if (err)
1775                         goto destroy_blkring;
1776         }
1777
1778 again:
1779         err = xenbus_transaction_start(&xbt);
1780         if (err) {
1781                 xenbus_dev_fatal(dev, err, "starting transaction");
1782                 goto destroy_blkring;
1783         }
1784
1785         if (info->nr_ring_pages > 1) {
1786                 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1787                                     ring_page_order);
1788                 if (err) {
1789                         message = "writing ring-page-order";
1790                         goto abort_transaction;
1791                 }
1792         }
1793
1794         /* We already got the number of queues/rings in _probe */
1795         if (info->nr_rings == 1) {
1796                 err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
1797                 if (err)
1798                         goto destroy_blkring;
1799         } else {
1800                 char *path;
1801                 size_t pathsize;
1802
1803                 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1804                                     info->nr_rings);
1805                 if (err) {
1806                         message = "writing multi-queue-num-queues";
1807                         goto abort_transaction;
1808                 }
1809
1810                 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1811                 path = kmalloc(pathsize, GFP_KERNEL);
1812                 if (!path) {
1813                         err = -ENOMEM;
1814                         message = "ENOMEM while writing ring references";
1815                         goto abort_transaction;
1816                 }
1817
1818                 for (i = 0; i < info->nr_rings; i++) {
1819                         memset(path, 0, pathsize);
1820                         snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1821                         err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
1822                         if (err) {
1823                                 kfree(path);
1824                                 goto destroy_blkring;
1825                         }
1826                 }
1827                 kfree(path);
1828         }
1829         err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1830                             XEN_IO_PROTO_ABI_NATIVE);
1831         if (err) {
1832                 message = "writing protocol";
1833                 goto abort_transaction;
1834         }
1835         err = xenbus_printf(xbt, dev->nodename,
1836                             "feature-persistent", "%u", 1);
1837         if (err)
1838                 dev_warn(&dev->dev,
1839                          "writing persistent grants feature to xenbus");
1840
1841         err = xenbus_transaction_end(xbt, 0);
1842         if (err) {
1843                 if (err == -EAGAIN)
1844                         goto again;
1845                 xenbus_dev_fatal(dev, err, "completing transaction");
1846                 goto destroy_blkring;
1847         }
1848
1849         for (i = 0; i < info->nr_rings; i++) {
1850                 unsigned int j;
1851                 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1852
1853                 for (j = 0; j < BLK_RING_SIZE(info); j++)
1854                         rinfo->shadow[j].req.u.rw.id = j + 1;
1855                 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1856         }
1857         xenbus_switch_state(dev, XenbusStateInitialised);
1858
1859         return 0;
1860
1861  abort_transaction:
1862         xenbus_transaction_end(xbt, 1);
1863         if (message)
1864                 xenbus_dev_fatal(dev, err, "%s", message);
1865  destroy_blkring:
1866         blkif_free(info, 0);
1867
1868         kfree(info);
1869         dev_set_drvdata(&dev->dev, NULL);
1870
1871         return err;
1872 }
1873
1874 static int negotiate_mq(struct blkfront_info *info)
1875 {
1876         unsigned int backend_max_queues;
1877         unsigned int i;
1878
1879         BUG_ON(info->nr_rings);
1880
1881         /* Check if backend supports multiple queues. */
1882         backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1883                                                   "multi-queue-max-queues", 1);
1884         info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1885         /* We need at least one ring. */
1886         if (!info->nr_rings)
1887                 info->nr_rings = 1;
1888
1889         info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1890         if (!info->rinfo) {
1891                 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1892                 return -ENOMEM;
1893         }
1894
1895         for (i = 0; i < info->nr_rings; i++) {
1896                 struct blkfront_ring_info *rinfo;
1897
1898                 rinfo = &info->rinfo[i];
1899                 INIT_LIST_HEAD(&rinfo->indirect_pages);
1900                 INIT_LIST_HEAD(&rinfo->grants);
1901                 rinfo->dev_info = info;
1902                 INIT_WORK(&rinfo->work, blkif_restart_queue);
1903                 spin_lock_init(&rinfo->ring_lock);
1904         }
1905         return 0;
1906 }
1907 /**
1908  * Entry point to this code when a new device is created.  Allocate the basic
1909  * structures and the ring buffer for communication with the backend, and
1910  * inform the backend of the appropriate details for those.  Switch to
1911  * Initialised state.
1912  */
1913 static int blkfront_probe(struct xenbus_device *dev,
1914                           const struct xenbus_device_id *id)
1915 {
1916         int err, vdevice;
1917         struct blkfront_info *info;
1918
1919         /* FIXME: Use dynamic device id if this is not set. */
1920         err = xenbus_scanf(XBT_NIL, dev->nodename,
1921                            "virtual-device", "%i", &vdevice);
1922         if (err != 1) {
1923                 /* go looking in the extended area instead */
1924                 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1925                                    "%i", &vdevice);
1926                 if (err != 1) {
1927                         xenbus_dev_fatal(dev, err, "reading virtual-device");
1928                         return err;
1929                 }
1930         }
1931
1932         if (xen_hvm_domain()) {
1933                 char *type;
1934                 int len;
1935                 /* no unplug has been done: do not hook devices != xen vbds */
1936                 if (xen_has_pv_and_legacy_disk_devices()) {
1937                         int major;
1938
1939                         if (!VDEV_IS_EXTENDED(vdevice))
1940                                 major = BLKIF_MAJOR(vdevice);
1941                         else
1942                                 major = XENVBD_MAJOR;
1943
1944                         if (major != XENVBD_MAJOR) {
1945                                 printk(KERN_INFO
1946                                                 "%s: HVM does not support vbd %d as xen block device\n",
1947                                                 __func__, vdevice);
1948                                 return -ENODEV;
1949                         }
1950                 }
1951                 /* do not create a PV cdrom device if we are an HVM guest */
1952                 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1953                 if (IS_ERR(type))
1954                         return -ENODEV;
1955                 if (strncmp(type, "cdrom", 5) == 0) {
1956                         kfree(type);
1957                         return -ENODEV;
1958                 }
1959                 kfree(type);
1960         }
1961         info = kzalloc(sizeof(*info), GFP_KERNEL);
1962         if (!info) {
1963                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1964                 return -ENOMEM;
1965         }
1966
1967         info->xbdev = dev;
1968         err = negotiate_mq(info);
1969         if (err) {
1970                 kfree(info);
1971                 return err;
1972         }
1973
1974         mutex_init(&info->mutex);
1975         info->vdevice = vdevice;
1976         info->connected = BLKIF_STATE_DISCONNECTED;
1977
1978         /* Front end dir is a number, which is used as the id. */
1979         info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1980         dev_set_drvdata(&dev->dev, info);
1981
1982         return 0;
1983 }
1984
1985 static void split_bio_end(struct bio *bio)
1986 {
1987         struct split_bio *split_bio = bio->bi_private;
1988
1989         if (atomic_dec_and_test(&split_bio->pending)) {
1990                 split_bio->bio->bi_phys_segments = 0;
1991                 split_bio->bio->bi_error = bio->bi_error;
1992                 bio_endio(split_bio->bio);
1993                 kfree(split_bio);
1994         }
1995         bio_put(bio);
1996 }
1997
1998 static int blkif_recover(struct blkfront_info *info)
1999 {
2000         unsigned int i, r_index;
2001         struct request *req, *n;
2002         int rc;
2003         struct bio *bio, *cloned_bio;
2004         unsigned int segs, offset;
2005         int pending, size;
2006         struct split_bio *split_bio;
2007
2008         blkfront_gather_backend_features(info);
2009         /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2010         blkif_set_queue_limits(info);
2011         segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2012         blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2013
2014         for (r_index = 0; r_index < info->nr_rings; r_index++) {
2015                 struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
2016
2017                 rc = blkfront_setup_indirect(rinfo);
2018                 if (rc)
2019                         return rc;
2020         }
2021         xenbus_switch_state(info->xbdev, XenbusStateConnected);
2022
2023         /* Now safe for us to use the shared ring */
2024         info->connected = BLKIF_STATE_CONNECTED;
2025
2026         for (r_index = 0; r_index < info->nr_rings; r_index++) {
2027                 struct blkfront_ring_info *rinfo;
2028
2029                 rinfo = &info->rinfo[r_index];
2030                 /* Kick any other new requests queued since we resumed */
2031                 kick_pending_request_queues(rinfo);
2032         }
2033
2034         list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2035                 /* Requeue pending requests (flush or discard) */
2036                 list_del_init(&req->queuelist);
2037                 BUG_ON(req->nr_phys_segments > segs);
2038                 blk_mq_requeue_request(req, false);
2039         }
2040         blk_mq_start_stopped_hw_queues(info->rq, true);
2041         blk_mq_kick_requeue_list(info->rq);
2042
2043         while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2044                 /* Traverse the list of pending bios and re-queue them */
2045                 if (bio_segments(bio) > segs) {
2046                         /*
2047                          * This bio has more segments than what we can
2048                          * handle, we have to split it.
2049                          */
2050                         pending = (bio_segments(bio) + segs - 1) / segs;
2051                         split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
2052                         BUG_ON(split_bio == NULL);
2053                         atomic_set(&split_bio->pending, pending);
2054                         split_bio->bio = bio;
2055                         for (i = 0; i < pending; i++) {
2056                                 offset = (i * segs * XEN_PAGE_SIZE) >> 9;
2057                                 size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
2058                                            (unsigned int)bio_sectors(bio) - offset);
2059                                 cloned_bio = bio_clone(bio, GFP_NOIO);
2060                                 BUG_ON(cloned_bio == NULL);
2061                                 bio_trim(cloned_bio, offset, size);
2062                                 cloned_bio->bi_private = split_bio;
2063                                 cloned_bio->bi_end_io = split_bio_end;
2064                                 submit_bio(cloned_bio);
2065                         }
2066                         /*
2067                          * Now we have to wait for all those smaller bios to
2068                          * end, so we can also end the "parent" bio.
2069                          */
2070                         continue;
2071                 }
2072                 /* We don't need to split this bio */
2073                 submit_bio(bio);
2074         }
2075
2076         return 0;
2077 }
2078
2079 /**
2080  * We are reconnecting to the backend, due to a suspend/resume, or a backend
2081  * driver restart.  We tear down our blkif structure and recreate it, but
2082  * leave the device-layer structures intact so that this is transparent to the
2083  * rest of the kernel.
2084  */
2085 static int blkfront_resume(struct xenbus_device *dev)
2086 {
2087         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2088         int err = 0;
2089         unsigned int i, j;
2090
2091         dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2092
2093         bio_list_init(&info->bio_list);
2094         INIT_LIST_HEAD(&info->requests);
2095         for (i = 0; i < info->nr_rings; i++) {
2096                 struct blkfront_ring_info *rinfo = &info->rinfo[i];
2097                 struct bio_list merge_bio;
2098                 struct blk_shadow *shadow = rinfo->shadow;
2099
2100                 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2101                         /* Not in use? */
2102                         if (!shadow[j].request)
2103                                 continue;
2104
2105                         /*
2106                          * Get the bios in the request so we can re-queue them.
2107                          */
2108                         if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
2109                             req_op(shadow[i].request) == REQ_OP_DISCARD ||
2110                             req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
2111                             shadow[j].request->cmd_flags & REQ_FUA) {
2112                                 /*
2113                                  * Flush operations don't contain bios, so
2114                                  * we need to requeue the whole request
2115                                  *
2116                                  * XXX: but this doesn't make any sense for a
2117                                  * write with the FUA flag set..
2118                                  */
2119                                 list_add(&shadow[j].request->queuelist, &info->requests);
2120                                 continue;
2121                         }
2122                         merge_bio.head = shadow[j].request->bio;
2123                         merge_bio.tail = shadow[j].request->biotail;
2124                         bio_list_merge(&info->bio_list, &merge_bio);
2125                         shadow[j].request->bio = NULL;
2126                         blk_mq_end_request(shadow[j].request, 0);
2127                 }
2128         }
2129
2130         blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2131
2132         err = negotiate_mq(info);
2133         if (err)
2134                 return err;
2135
2136         err = talk_to_blkback(dev, info);
2137         if (!err)
2138                 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2139
2140         /*
2141          * We have to wait for the backend to switch to
2142          * connected state, since we want to read which
2143          * features it supports.
2144          */
2145
2146         return err;
2147 }
2148
2149 static void blkfront_closing(struct blkfront_info *info)
2150 {
2151         struct xenbus_device *xbdev = info->xbdev;
2152         struct block_device *bdev = NULL;
2153
2154         mutex_lock(&info->mutex);
2155
2156         if (xbdev->state == XenbusStateClosing) {
2157                 mutex_unlock(&info->mutex);
2158                 return;
2159         }
2160
2161         if (info->gd)
2162                 bdev = bdget_disk(info->gd, 0);
2163
2164         mutex_unlock(&info->mutex);
2165
2166         if (!bdev) {
2167                 xenbus_frontend_closed(xbdev);
2168                 return;
2169         }
2170
2171         mutex_lock(&bdev->bd_mutex);
2172
2173         if (bdev->bd_openers) {
2174                 xenbus_dev_error(xbdev, -EBUSY,
2175                                  "Device in use; refusing to close");
2176                 xenbus_switch_state(xbdev, XenbusStateClosing);
2177         } else {
2178                 xlvbd_release_gendisk(info);
2179                 xenbus_frontend_closed(xbdev);
2180         }
2181
2182         mutex_unlock(&bdev->bd_mutex);
2183         bdput(bdev);
2184 }
2185
2186 static void blkfront_setup_discard(struct blkfront_info *info)
2187 {
2188         int err;
2189         unsigned int discard_granularity;
2190         unsigned int discard_alignment;
2191
2192         info->feature_discard = 1;
2193         err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2194                 "discard-granularity", "%u", &discard_granularity,
2195                 "discard-alignment", "%u", &discard_alignment,
2196                 NULL);
2197         if (!err) {
2198                 info->discard_granularity = discard_granularity;
2199                 info->discard_alignment = discard_alignment;
2200         }
2201         info->feature_secdiscard =
2202                 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2203                                        0);
2204 }
2205
2206 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2207 {
2208         unsigned int psegs, grants;
2209         int err, i;
2210         struct blkfront_info *info = rinfo->dev_info;
2211
2212         if (info->max_indirect_segments == 0) {
2213                 if (!HAS_EXTRA_REQ)
2214                         grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2215                 else {
2216                         /*
2217                          * When an extra req is required, the maximum
2218                          * grants supported is related to the size of the
2219                          * Linux block segment.
2220                          */
2221                         grants = GRANTS_PER_PSEG;
2222                 }
2223         }
2224         else
2225                 grants = info->max_indirect_segments;
2226         psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2227
2228         err = fill_grant_buffer(rinfo,
2229                                 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2230         if (err)
2231                 goto out_of_memory;
2232
2233         if (!info->feature_persistent && info->max_indirect_segments) {
2234                 /*
2235                  * We are using indirect descriptors but not persistent
2236                  * grants, we need to allocate a set of pages that can be
2237                  * used for mapping indirect grefs
2238                  */
2239                 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2240
2241                 BUG_ON(!list_empty(&rinfo->indirect_pages));
2242                 for (i = 0; i < num; i++) {
2243                         struct page *indirect_page = alloc_page(GFP_NOIO);
2244                         if (!indirect_page)
2245                                 goto out_of_memory;
2246                         list_add(&indirect_page->lru, &rinfo->indirect_pages);
2247                 }
2248         }
2249
2250         for (i = 0; i < BLK_RING_SIZE(info); i++) {
2251                 rinfo->shadow[i].grants_used = kzalloc(
2252                         sizeof(rinfo->shadow[i].grants_used[0]) * grants,
2253                         GFP_NOIO);
2254                 rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO);
2255                 if (info->max_indirect_segments)
2256                         rinfo->shadow[i].indirect_grants = kzalloc(
2257                                 sizeof(rinfo->shadow[i].indirect_grants[0]) *
2258                                 INDIRECT_GREFS(grants),
2259                                 GFP_NOIO);
2260                 if ((rinfo->shadow[i].grants_used == NULL) ||
2261                         (rinfo->shadow[i].sg == NULL) ||
2262                      (info->max_indirect_segments &&
2263                      (rinfo->shadow[i].indirect_grants == NULL)))
2264                         goto out_of_memory;
2265                 sg_init_table(rinfo->shadow[i].sg, psegs);
2266         }
2267
2268
2269         return 0;
2270
2271 out_of_memory:
2272         for (i = 0; i < BLK_RING_SIZE(info); i++) {
2273                 kfree(rinfo->shadow[i].grants_used);
2274                 rinfo->shadow[i].grants_used = NULL;
2275                 kfree(rinfo->shadow[i].sg);
2276                 rinfo->shadow[i].sg = NULL;
2277                 kfree(rinfo->shadow[i].indirect_grants);
2278                 rinfo->shadow[i].indirect_grants = NULL;
2279         }
2280         if (!list_empty(&rinfo->indirect_pages)) {
2281                 struct page *indirect_page, *n;
2282                 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2283                         list_del(&indirect_page->lru);
2284                         __free_page(indirect_page);
2285                 }
2286         }
2287         return -ENOMEM;
2288 }
2289
2290 /*
2291  * Gather all backend feature-*
2292  */
2293 static void blkfront_gather_backend_features(struct blkfront_info *info)
2294 {
2295         unsigned int indirect_segments;
2296
2297         info->feature_flush = 0;
2298         info->feature_fua = 0;
2299
2300         /*
2301          * If there's no "feature-barrier" defined, then it means
2302          * we're dealing with a very old backend which writes
2303          * synchronously; nothing to do.
2304          *
2305          * If there are barriers, then we use flush.
2306          */
2307         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2308                 info->feature_flush = 1;
2309                 info->feature_fua = 1;
2310         }
2311
2312         /*
2313          * And if there is "feature-flush-cache" use that above
2314          * barriers.
2315          */
2316         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2317                                  0)) {
2318                 info->feature_flush = 1;
2319                 info->feature_fua = 0;
2320         }
2321
2322         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2323                 blkfront_setup_discard(info);
2324
2325         info->feature_persistent =
2326                 !!xenbus_read_unsigned(info->xbdev->otherend,
2327                                        "feature-persistent", 0);
2328
2329         indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2330                                         "feature-max-indirect-segments", 0);
2331         if (indirect_segments > xen_blkif_max_segments)
2332                 indirect_segments = xen_blkif_max_segments;
2333         if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2334                 indirect_segments = 0;
2335         info->max_indirect_segments = indirect_segments;
2336 }
2337
2338 /*
2339  * Invoked when the backend is finally 'ready' (and has told produced
2340  * the details about the physical device - #sectors, size, etc).
2341  */
2342 static void blkfront_connect(struct blkfront_info *info)
2343 {
2344         unsigned long long sectors;
2345         unsigned long sector_size;
2346         unsigned int physical_sector_size;
2347         unsigned int binfo;
2348         int err, i;
2349
2350         switch (info->connected) {
2351         case BLKIF_STATE_CONNECTED:
2352                 /*
2353                  * Potentially, the back-end may be signalling
2354                  * a capacity change; update the capacity.
2355                  */
2356                 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2357                                    "sectors", "%Lu", &sectors);
2358                 if (XENBUS_EXIST_ERR(err))
2359                         return;
2360                 printk(KERN_INFO "Setting capacity to %Lu\n",
2361                        sectors);
2362                 set_capacity(info->gd, sectors);
2363                 revalidate_disk(info->gd);
2364
2365                 return;
2366         case BLKIF_STATE_SUSPENDED:
2367                 /*
2368                  * If we are recovering from suspension, we need to wait
2369                  * for the backend to announce it's features before
2370                  * reconnecting, at least we need to know if the backend
2371                  * supports indirect descriptors, and how many.
2372                  */
2373                 blkif_recover(info);
2374                 return;
2375
2376         default:
2377                 break;
2378         }
2379
2380         dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2381                 __func__, info->xbdev->otherend);
2382
2383         err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2384                             "sectors", "%llu", &sectors,
2385                             "info", "%u", &binfo,
2386                             "sector-size", "%lu", &sector_size,
2387                             NULL);
2388         if (err) {
2389                 xenbus_dev_fatal(info->xbdev, err,
2390                                  "reading backend fields at %s",
2391                                  info->xbdev->otherend);
2392                 return;
2393         }
2394
2395         /*
2396          * physcial-sector-size is a newer field, so old backends may not
2397          * provide this. Assume physical sector size to be the same as
2398          * sector_size in that case.
2399          */
2400         physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2401                                                     "physical-sector-size",
2402                                                     sector_size);
2403         blkfront_gather_backend_features(info);
2404         for (i = 0; i < info->nr_rings; i++) {
2405                 err = blkfront_setup_indirect(&info->rinfo[i]);
2406                 if (err) {
2407                         xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2408                                          info->xbdev->otherend);
2409                         blkif_free(info, 0);
2410                         break;
2411                 }
2412         }
2413
2414         err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2415                                   physical_sector_size);
2416         if (err) {
2417                 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2418                                  info->xbdev->otherend);
2419                 goto fail;
2420         }
2421
2422         xenbus_switch_state(info->xbdev, XenbusStateConnected);
2423
2424         /* Kick pending requests. */
2425         info->connected = BLKIF_STATE_CONNECTED;
2426         for (i = 0; i < info->nr_rings; i++)
2427                 kick_pending_request_queues(&info->rinfo[i]);
2428
2429         device_add_disk(&info->xbdev->dev, info->gd);
2430
2431         info->is_ready = 1;
2432         return;
2433
2434 fail:
2435         blkif_free(info, 0);
2436         return;
2437 }
2438
2439 /**
2440  * Callback received when the backend's state changes.
2441  */
2442 static void blkback_changed(struct xenbus_device *dev,
2443                             enum xenbus_state backend_state)
2444 {
2445         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2446
2447         dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2448
2449         switch (backend_state) {
2450         case XenbusStateInitWait:
2451                 if (dev->state != XenbusStateInitialising)
2452                         break;
2453                 if (talk_to_blkback(dev, info))
2454                         break;
2455         case XenbusStateInitialising:
2456         case XenbusStateInitialised:
2457         case XenbusStateReconfiguring:
2458         case XenbusStateReconfigured:
2459         case XenbusStateUnknown:
2460                 break;
2461
2462         case XenbusStateConnected:
2463                 /*
2464                  * talk_to_blkback sets state to XenbusStateInitialised
2465                  * and blkfront_connect sets it to XenbusStateConnected
2466                  * (if connection went OK).
2467                  *
2468                  * If the backend (or toolstack) decides to poke at backend
2469                  * state (and re-trigger the watch by setting the state repeatedly
2470                  * to XenbusStateConnected (4)) we need to deal with this.
2471                  * This is allowed as this is used to communicate to the guest
2472                  * that the size of disk has changed!
2473                  */
2474                 if ((dev->state != XenbusStateInitialised) &&
2475                     (dev->state != XenbusStateConnected)) {
2476                         if (talk_to_blkback(dev, info))
2477                                 break;
2478                 }
2479
2480                 blkfront_connect(info);
2481                 break;
2482
2483         case XenbusStateClosed:
2484                 if (dev->state == XenbusStateClosed)
2485                         break;
2486                 /* Missed the backend's Closing state -- fallthrough */
2487         case XenbusStateClosing:
2488                 if (info)
2489                         blkfront_closing(info);
2490                 break;
2491         }
2492 }
2493
2494 static int blkfront_remove(struct xenbus_device *xbdev)
2495 {
2496         struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2497         struct block_device *bdev = NULL;
2498         struct gendisk *disk;
2499
2500         dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2501
2502         blkif_free(info, 0);
2503
2504         mutex_lock(&info->mutex);
2505
2506         disk = info->gd;
2507         if (disk)
2508                 bdev = bdget_disk(disk, 0);
2509
2510         info->xbdev = NULL;
2511         mutex_unlock(&info->mutex);
2512
2513         if (!bdev) {
2514                 kfree(info);
2515                 return 0;
2516         }
2517
2518         /*
2519          * The xbdev was removed before we reached the Closed
2520          * state. See if it's safe to remove the disk. If the bdev
2521          * isn't closed yet, we let release take care of it.
2522          */
2523
2524         mutex_lock(&bdev->bd_mutex);
2525         info = disk->private_data;
2526
2527         dev_warn(disk_to_dev(disk),
2528                  "%s was hot-unplugged, %d stale handles\n",
2529                  xbdev->nodename, bdev->bd_openers);
2530
2531         if (info && !bdev->bd_openers) {
2532                 xlvbd_release_gendisk(info);
2533                 disk->private_data = NULL;
2534                 kfree(info);
2535         }
2536
2537         mutex_unlock(&bdev->bd_mutex);
2538         bdput(bdev);
2539
2540         return 0;
2541 }
2542
2543 static int blkfront_is_ready(struct xenbus_device *dev)
2544 {
2545         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2546
2547         return info->is_ready && info->xbdev;
2548 }
2549
2550 static int blkif_open(struct block_device *bdev, fmode_t mode)
2551 {
2552         struct gendisk *disk = bdev->bd_disk;
2553         struct blkfront_info *info;
2554         int err = 0;
2555
2556         mutex_lock(&blkfront_mutex);
2557
2558         info = disk->private_data;
2559         if (!info) {
2560                 /* xbdev gone */
2561                 err = -ERESTARTSYS;
2562                 goto out;
2563         }
2564
2565         mutex_lock(&info->mutex);
2566
2567         if (!info->gd)
2568                 /* xbdev is closed */
2569                 err = -ERESTARTSYS;
2570
2571         mutex_unlock(&info->mutex);
2572
2573 out:
2574         mutex_unlock(&blkfront_mutex);
2575         return err;
2576 }
2577
2578 static void blkif_release(struct gendisk *disk, fmode_t mode)
2579 {
2580         struct blkfront_info *info = disk->private_data;
2581         struct block_device *bdev;
2582         struct xenbus_device *xbdev;
2583
2584         mutex_lock(&blkfront_mutex);
2585
2586         bdev = bdget_disk(disk, 0);
2587
2588         if (!bdev) {
2589                 WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2590                 goto out_mutex;
2591         }
2592         if (bdev->bd_openers)
2593                 goto out;
2594
2595         /*
2596          * Check if we have been instructed to close. We will have
2597          * deferred this request, because the bdev was still open.
2598          */
2599
2600         mutex_lock(&info->mutex);
2601         xbdev = info->xbdev;
2602
2603         if (xbdev && xbdev->state == XenbusStateClosing) {
2604                 /* pending switch to state closed */
2605                 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2606                 xlvbd_release_gendisk(info);
2607                 xenbus_frontend_closed(info->xbdev);
2608         }
2609
2610         mutex_unlock(&info->mutex);
2611
2612         if (!xbdev) {
2613                 /* sudden device removal */
2614                 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2615                 xlvbd_release_gendisk(info);
2616                 disk->private_data = NULL;
2617                 kfree(info);
2618         }
2619
2620 out:
2621         bdput(bdev);
2622 out_mutex:
2623         mutex_unlock(&blkfront_mutex);
2624 }
2625
2626 static const struct block_device_operations xlvbd_block_fops =
2627 {
2628         .owner = THIS_MODULE,
2629         .open = blkif_open,
2630         .release = blkif_release,
2631         .getgeo = blkif_getgeo,
2632         .ioctl = blkif_ioctl,
2633 };
2634
2635
2636 static const struct xenbus_device_id blkfront_ids[] = {
2637         { "vbd" },
2638         { "" }
2639 };
2640
2641 static struct xenbus_driver blkfront_driver = {
2642         .ids  = blkfront_ids,
2643         .probe = blkfront_probe,
2644         .remove = blkfront_remove,
2645         .resume = blkfront_resume,
2646         .otherend_changed = blkback_changed,
2647         .is_ready = blkfront_is_ready,
2648 };
2649
2650 static int __init xlblk_init(void)
2651 {
2652         int ret;
2653         int nr_cpus = num_online_cpus();
2654
2655         if (!xen_domain())
2656                 return -ENODEV;
2657
2658         if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2659                 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2660
2661         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2662                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2663                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2664                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2665         }
2666
2667         if (xen_blkif_max_queues > nr_cpus) {
2668                 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2669                         xen_blkif_max_queues, nr_cpus);
2670                 xen_blkif_max_queues = nr_cpus;
2671         }
2672
2673         if (!xen_has_pv_disk_devices())
2674                 return -ENODEV;
2675
2676         if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2677                 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2678                        XENVBD_MAJOR, DEV_NAME);
2679                 return -ENODEV;
2680         }
2681
2682         ret = xenbus_register_frontend(&blkfront_driver);
2683         if (ret) {
2684                 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2685                 return ret;
2686         }
2687
2688         return 0;
2689 }
2690 module_init(xlblk_init);
2691
2692
2693 static void __exit xlblk_exit(void)
2694 {
2695         xenbus_unregister_driver(&blkfront_driver);
2696         unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2697         kfree(minors);
2698 }
2699 module_exit(xlblk_exit);
2700
2701 MODULE_DESCRIPTION("Xen virtual block device frontend");
2702 MODULE_LICENSE("GPL");
2703 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2704 MODULE_ALIAS("xen:vbd");
2705 MODULE_ALIAS("xenblk");