]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/block/xen-blkback/common.h
638597b17a38c1da6e0917241b65c972d4d2b646
[karo-tx-linux.git] / drivers / block / xen-blkback / common.h
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License version 2
4  * as published by the Free Software Foundation; or, when distributed
5  * separately from the Linux kernel or incorporated into other
6  * software packages, subject to the following license:
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this source file (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use, copy, modify,
11  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12  * and to permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24  * IN THE SOFTWARE.
25  */
26
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
29
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
36 #include <linux/io.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
42 #include <xen/page.h>
43 #include <xen/xenbus.h>
44 #include <xen/interface/io/ring.h>
45 #include <xen/interface/io/blkif.h>
46 #include <xen/interface/io/protocols.h>
47
48 extern unsigned int xen_blkif_max_ring_order;
49 extern unsigned int xenblk_max_queues;
50 /*
51  * This is the maximum number of segments that would be allowed in indirect
52  * requests. This value will also be passed to the frontend.
53  */
54 #define MAX_INDIRECT_SEGMENTS 256
55
56 /*
57  * Xen use 4K pages. The guest may use different page size (4K or 64K)
58  * Number of Xen pages per segment
59  */
60 #define XEN_PAGES_PER_SEGMENT   (PAGE_SIZE / XEN_PAGE_SIZE)
61
62 #define XEN_PAGES_PER_INDIRECT_FRAME \
63         (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
64 #define SEGS_PER_INDIRECT_FRAME \
65         (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
66
67 #define MAX_INDIRECT_PAGES \
68         ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
69 #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
70
71 /* Not a real protocol.  Used to generate ring structs which contain
72  * the elements common to all protocols only.  This way we get a
73  * compiler-checkable way to use common struct elements, so we can
74  * avoid using switch(protocol) in a number of places.  */
75 struct blkif_common_request {
76         char dummy;
77 };
78 struct blkif_common_response {
79         char dummy;
80 };
81
82 struct blkif_x86_32_request_rw {
83         uint8_t        nr_segments;  /* number of segments                   */
84         blkif_vdev_t   handle;       /* only for read/write requests         */
85         uint64_t       id;           /* private guest value, echoed in resp  */
86         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
87         struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
88 } __attribute__((__packed__));
89
90 struct blkif_x86_32_request_discard {
91         uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
92         blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
93         uint64_t       id;           /* private guest value, echoed in resp  */
94         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
95         uint64_t       nr_sectors;
96 } __attribute__((__packed__));
97
98 struct blkif_x86_32_request_other {
99         uint8_t        _pad1;
100         blkif_vdev_t   _pad2;
101         uint64_t       id;           /* private guest value, echoed in resp  */
102 } __attribute__((__packed__));
103
104 struct blkif_x86_32_request_indirect {
105         uint8_t        indirect_op;
106         uint16_t       nr_segments;
107         uint64_t       id;
108         blkif_sector_t sector_number;
109         blkif_vdev_t   handle;
110         uint16_t       _pad1;
111         grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
112         /*
113          * The maximum number of indirect segments (and pages) that will
114          * be used is determined by MAX_INDIRECT_SEGMENTS, this value
115          * is also exported to the guest (via xenstore
116          * feature-max-indirect-segments entry), so the frontend knows how
117          * many indirect segments the backend supports.
118          */
119         uint64_t       _pad2;        /* make it 64 byte aligned */
120 } __attribute__((__packed__));
121
122 struct blkif_x86_32_request {
123         uint8_t        operation;    /* BLKIF_OP_???                         */
124         union {
125                 struct blkif_x86_32_request_rw rw;
126                 struct blkif_x86_32_request_discard discard;
127                 struct blkif_x86_32_request_other other;
128                 struct blkif_x86_32_request_indirect indirect;
129         } u;
130 } __attribute__((__packed__));
131
132 /* i386 protocol version */
133 #pragma pack(push, 4)
134 struct blkif_x86_32_response {
135         uint64_t        id;              /* copied from request */
136         uint8_t         operation;       /* copied from request */
137         int16_t         status;          /* BLKIF_RSP_???       */
138 };
139 #pragma pack(pop)
140 /* x86_64 protocol version */
141
142 struct blkif_x86_64_request_rw {
143         uint8_t        nr_segments;  /* number of segments                   */
144         blkif_vdev_t   handle;       /* only for read/write requests         */
145         uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
146         uint64_t       id;
147         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
148         struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
149 } __attribute__((__packed__));
150
151 struct blkif_x86_64_request_discard {
152         uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
153         blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
154         uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
155         uint64_t       id;
156         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
157         uint64_t       nr_sectors;
158 } __attribute__((__packed__));
159
160 struct blkif_x86_64_request_other {
161         uint8_t        _pad1;
162         blkif_vdev_t   _pad2;
163         uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
164         uint64_t       id;           /* private guest value, echoed in resp  */
165 } __attribute__((__packed__));
166
167 struct blkif_x86_64_request_indirect {
168         uint8_t        indirect_op;
169         uint16_t       nr_segments;
170         uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
171         uint64_t       id;
172         blkif_sector_t sector_number;
173         blkif_vdev_t   handle;
174         uint16_t       _pad2;
175         grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
176         /*
177          * The maximum number of indirect segments (and pages) that will
178          * be used is determined by MAX_INDIRECT_SEGMENTS, this value
179          * is also exported to the guest (via xenstore
180          * feature-max-indirect-segments entry), so the frontend knows how
181          * many indirect segments the backend supports.
182          */
183         uint32_t       _pad3;        /* make it 64 byte aligned */
184 } __attribute__((__packed__));
185
186 struct blkif_x86_64_request {
187         uint8_t        operation;    /* BLKIF_OP_???                         */
188         union {
189                 struct blkif_x86_64_request_rw rw;
190                 struct blkif_x86_64_request_discard discard;
191                 struct blkif_x86_64_request_other other;
192                 struct blkif_x86_64_request_indirect indirect;
193         } u;
194 } __attribute__((__packed__));
195
196 struct blkif_x86_64_response {
197         uint64_t       __attribute__((__aligned__(8))) id;
198         uint8_t         operation;       /* copied from request */
199         int16_t         status;          /* BLKIF_RSP_???       */
200 };
201
202 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
203                   struct blkif_common_response);
204 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
205                   struct blkif_x86_32_response);
206 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
207                   struct blkif_x86_64_response);
208
209 union blkif_back_rings {
210         struct blkif_back_ring        native;
211         struct blkif_common_back_ring common;
212         struct blkif_x86_32_back_ring x86_32;
213         struct blkif_x86_64_back_ring x86_64;
214 };
215
216 enum blkif_protocol {
217         BLKIF_PROTOCOL_NATIVE = 1,
218         BLKIF_PROTOCOL_X86_32 = 2,
219         BLKIF_PROTOCOL_X86_64 = 3,
220 };
221
222 /*
223  * Default protocol if the frontend doesn't specify one.
224  */
225 #ifdef CONFIG_X86
226 #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
227 #else
228 #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
229 #endif
230
231 struct xen_vbd {
232         /* What the domain refers to this vbd as. */
233         blkif_vdev_t            handle;
234         /* Non-zero -> read-only */
235         unsigned char           readonly;
236         /* VDISK_xxx */
237         unsigned char           type;
238         /* phys device that this vbd maps to. */
239         u32                     pdevice;
240         struct block_device     *bdev;
241         /* Cached size parameter. */
242         sector_t                size;
243         unsigned int            flush_support:1;
244         unsigned int            discard_secure:1;
245         unsigned int            feature_gnt_persistent:1;
246         unsigned int            overflow_max_grants:1;
247 };
248
249 struct backend_info;
250
251 /* Number of available flags */
252 #define PERSISTENT_GNT_FLAGS_SIZE       2
253 /* This persistent grant is currently in use */
254 #define PERSISTENT_GNT_ACTIVE           0
255 /*
256  * This persistent grant has been used, this flag is set when we remove the
257  * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
258  */
259 #define PERSISTENT_GNT_WAS_ACTIVE       1
260
261 /* Number of requests that we can fit in a ring */
262 #define XEN_BLKIF_REQS_PER_PAGE         32
263
264 struct persistent_gnt {
265         struct page *page;
266         grant_ref_t gnt;
267         grant_handle_t handle;
268         DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
269         struct rb_node node;
270         struct list_head remove_node;
271 };
272
273 /* Per-ring information. */
274 struct xen_blkif_ring {
275         /* Physical parameters of the comms window. */
276         unsigned int            irq;
277         union blkif_back_rings  blk_rings;
278         void                    *blk_ring;
279         /* Private fields. */
280         spinlock_t              blk_ring_lock;
281
282         wait_queue_head_t       wq;
283         atomic_t                inflight;
284         bool                    active;
285         /* One thread per blkif ring. */
286         struct task_struct      *xenblkd;
287         unsigned int            waiting_reqs;
288
289         /* List of all 'pending_req' available */
290         struct list_head        pending_free;
291         /* And its spinlock. */
292         spinlock_t              pending_free_lock;
293         wait_queue_head_t       pending_free_wq;
294
295         /* Tree to store persistent grants. */
296         spinlock_t              pers_gnts_lock;
297         struct rb_root          persistent_gnts;
298         unsigned int            persistent_gnt_c;
299         atomic_t                persistent_gnt_in_use;
300         unsigned long           next_lru;
301
302         /* Statistics. */
303         unsigned long           st_print;
304         unsigned long long      st_rd_req;
305         unsigned long long      st_wr_req;
306         unsigned long long      st_oo_req;
307         unsigned long long      st_f_req;
308         unsigned long long      st_ds_req;
309         unsigned long long      st_rd_sect;
310         unsigned long long      st_wr_sect;
311
312         /* Used by the kworker that offload work from the persistent purge. */
313         struct list_head        persistent_purge_list;
314         struct work_struct      persistent_purge_work;
315
316         /* Buffer of free pages to map grant refs. */
317         spinlock_t              free_pages_lock;
318         int                     free_pages_num;
319         struct list_head        free_pages;
320
321         struct work_struct      free_work;
322         /* Thread shutdown wait queue. */
323         wait_queue_head_t       shutdown_wq;
324         struct xen_blkif        *blkif;
325 };
326
327 struct xen_blkif {
328         /* Unique identifier for this interface. */
329         domid_t                 domid;
330         unsigned int            handle;
331         /* Comms information. */
332         enum blkif_protocol     blk_protocol;
333         /* The VBD attached to this interface. */
334         struct xen_vbd          vbd;
335         /* Back pointer to the backend_info. */
336         struct backend_info     *be;
337         atomic_t                refcnt;
338         /* for barrier (drain) requests */
339         struct completion       drain_complete;
340         atomic_t                drain;
341
342         struct work_struct      free_work;
343         unsigned int            nr_ring_pages;
344         /* All rings for this device. */
345         struct xen_blkif_ring   *rings;
346         unsigned int            nr_rings;
347 };
348
349 struct seg_buf {
350         unsigned long offset;
351         unsigned int nsec;
352 };
353
354 struct grant_page {
355         struct page             *page;
356         struct persistent_gnt   *persistent_gnt;
357         grant_handle_t          handle;
358         grant_ref_t             gref;
359 };
360
361 /*
362  * Each outstanding request that we've passed to the lower device layers has a
363  * 'pending_req' allocated to it. Each buffer_head that completes decrements
364  * the pendcnt towards zero. When it hits zero, the specified domain has a
365  * response queued for it, with the saved 'id' passed back.
366  */
367 struct pending_req {
368         struct xen_blkif_ring   *ring;
369         u64                     id;
370         int                     nr_segs;
371         atomic_t                pendcnt;
372         unsigned short          operation;
373         int                     status;
374         struct list_head        free_list;
375         struct grant_page       *segments[MAX_INDIRECT_SEGMENTS];
376         /* Indirect descriptors */
377         struct grant_page       *indirect_pages[MAX_INDIRECT_PAGES];
378         struct seg_buf          seg[MAX_INDIRECT_SEGMENTS];
379         struct bio              *biolist[MAX_INDIRECT_SEGMENTS];
380         struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
381         struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
382         struct gntab_unmap_queue_data gnttab_unmap_data;
383 };
384
385
386 #define vbd_sz(_v)      ((_v)->bdev->bd_part ? \
387                          (_v)->bdev->bd_part->nr_sects : \
388                           get_capacity((_v)->bdev->bd_disk))
389
390 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
391 #define xen_blkif_put(_b)                               \
392         do {                                            \
393                 if (atomic_dec_and_test(&(_b)->refcnt)) \
394                         schedule_work(&(_b)->free_work);\
395         } while (0)
396
397 struct phys_req {
398         unsigned short          dev;
399         blkif_sector_t          nr_sects;
400         struct block_device     *bdev;
401         blkif_sector_t          sector_number;
402 };
403 int xen_blkif_interface_init(void);
404
405 int xen_blkif_xenbus_init(void);
406
407 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
408 int xen_blkif_schedule(void *arg);
409 int xen_blkif_purge_persistent(void *arg);
410 void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
411
412 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
413                               struct backend_info *be, int state);
414
415 int xen_blkbk_barrier(struct xenbus_transaction xbt,
416                       struct backend_info *be, int state);
417 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
418 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
419
420 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
421                                         struct blkif_x86_32_request *src)
422 {
423         int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
424         dst->operation = READ_ONCE(src->operation);
425         switch (dst->operation) {
426         case BLKIF_OP_READ:
427         case BLKIF_OP_WRITE:
428         case BLKIF_OP_WRITE_BARRIER:
429         case BLKIF_OP_FLUSH_DISKCACHE:
430                 dst->u.rw.nr_segments = src->u.rw.nr_segments;
431                 dst->u.rw.handle = src->u.rw.handle;
432                 dst->u.rw.id = src->u.rw.id;
433                 dst->u.rw.sector_number = src->u.rw.sector_number;
434                 barrier();
435                 if (n > dst->u.rw.nr_segments)
436                         n = dst->u.rw.nr_segments;
437                 for (i = 0; i < n; i++)
438                         dst->u.rw.seg[i] = src->u.rw.seg[i];
439                 break;
440         case BLKIF_OP_DISCARD:
441                 dst->u.discard.flag = src->u.discard.flag;
442                 dst->u.discard.id = src->u.discard.id;
443                 dst->u.discard.sector_number = src->u.discard.sector_number;
444                 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
445                 break;
446         case BLKIF_OP_INDIRECT:
447                 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
448                 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
449                 dst->u.indirect.handle = src->u.indirect.handle;
450                 dst->u.indirect.id = src->u.indirect.id;
451                 dst->u.indirect.sector_number = src->u.indirect.sector_number;
452                 barrier();
453                 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
454                 for (i = 0; i < j; i++)
455                         dst->u.indirect.indirect_grefs[i] =
456                                 src->u.indirect.indirect_grefs[i];
457                 break;
458         default:
459                 /*
460                  * Don't know how to translate this op. Only get the
461                  * ID so failure can be reported to the frontend.
462                  */
463                 dst->u.other.id = src->u.other.id;
464                 break;
465         }
466 }
467
468 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
469                                         struct blkif_x86_64_request *src)
470 {
471         int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
472         dst->operation = READ_ONCE(src->operation);
473         switch (dst->operation) {
474         case BLKIF_OP_READ:
475         case BLKIF_OP_WRITE:
476         case BLKIF_OP_WRITE_BARRIER:
477         case BLKIF_OP_FLUSH_DISKCACHE:
478                 dst->u.rw.nr_segments = src->u.rw.nr_segments;
479                 dst->u.rw.handle = src->u.rw.handle;
480                 dst->u.rw.id = src->u.rw.id;
481                 dst->u.rw.sector_number = src->u.rw.sector_number;
482                 barrier();
483                 if (n > dst->u.rw.nr_segments)
484                         n = dst->u.rw.nr_segments;
485                 for (i = 0; i < n; i++)
486                         dst->u.rw.seg[i] = src->u.rw.seg[i];
487                 break;
488         case BLKIF_OP_DISCARD:
489                 dst->u.discard.flag = src->u.discard.flag;
490                 dst->u.discard.id = src->u.discard.id;
491                 dst->u.discard.sector_number = src->u.discard.sector_number;
492                 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
493                 break;
494         case BLKIF_OP_INDIRECT:
495                 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
496                 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
497                 dst->u.indirect.handle = src->u.indirect.handle;
498                 dst->u.indirect.id = src->u.indirect.id;
499                 dst->u.indirect.sector_number = src->u.indirect.sector_number;
500                 barrier();
501                 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
502                 for (i = 0; i < j; i++)
503                         dst->u.indirect.indirect_grefs[i] =
504                                 src->u.indirect.indirect_grefs[i];
505                 break;
506         default:
507                 /*
508                  * Don't know how to translate this op. Only get the
509                  * ID so failure can be reported to the frontend.
510                  */
511                 dst->u.other.id = src->u.other.id;
512                 break;
513         }
514 }
515
516 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */