1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
29 /* For development, we want to crash whenever the ring is screwed. */
30 #define BAD_RING(_vq, fmt, args...) \
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
36 /* Caller is supposed to guarantee no reentry. */
37 #define START_USE(_vq) \
40 panic("%s:in_use = %i\n", \
41 (_vq)->vq.name, (_vq)->in_use); \
42 (_vq)->in_use = __LINE__; \
44 #define END_USE(_vq) \
45 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
47 #define BAD_RING(_vq, fmt, args...) \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
51 (_vq)->broken = true; \
57 struct vring_virtqueue {
60 /* Actual memory layout for this queue */
63 /* Can we use weak barriers? */
66 /* Other side has made a mess, don't try any more. */
69 /* Host supports indirect buffers */
72 /* Host publishes avail event idx */
75 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
80 /* Last used index we've seen. */
83 /* How to notify other side. FIXME: commonalize hcalls! */
84 bool (*notify)(struct virtqueue *vq);
87 /* They're supposed to lock for us. */
90 /* Figure out if their kicks are too delayed. */
91 bool last_add_time_valid;
92 ktime_t last_add_time;
96 /* Tokens for callbacks. */
100 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
102 static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
103 unsigned int total_sg, gfp_t gfp)
105 struct vring_desc *desc;
109 * We require lowmem mappings for the descriptors because
110 * otherwise virt_to_phys will give us bogus addresses in the
113 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
115 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
119 for (i = 0; i < total_sg; i++)
120 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
124 static inline int virtqueue_add(struct virtqueue *_vq,
125 struct scatterlist *sgs[],
126 unsigned int total_sg,
127 unsigned int out_sgs,
132 struct vring_virtqueue *vq = to_vvq(_vq);
133 struct scatterlist *sg;
134 struct vring_desc *desc;
135 unsigned int i, n, avail, descs_used, uninitialized_var(prev);
141 BUG_ON(data == NULL);
143 if (unlikely(vq->broken)) {
150 ktime_t now = ktime_get();
152 /* No kick or get, with .1 second between? Warn. */
153 if (vq->last_add_time_valid)
154 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
156 vq->last_add_time = now;
157 vq->last_add_time_valid = true;
161 BUG_ON(total_sg > vq->vring.num);
162 BUG_ON(total_sg == 0);
164 head = vq->free_head;
166 /* If the host supports indirect descriptor tables, and we have multiple
167 * buffers, then go indirect. FIXME: tune this threshold */
168 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
169 desc = alloc_indirect(_vq, total_sg, gfp);
174 /* Use a single buffer which doesn't continue */
175 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
176 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
177 /* avoid kmemleak false positive (hidden by virt_to_phys) */
178 kmemleak_ignore(desc);
179 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
181 /* Set up rest to use this indirect table. */
186 desc = vq->vring.desc;
188 descs_used = total_sg;
192 if (vq->vq.num_free < descs_used) {
193 pr_debug("Can't add buf len %i - avail = %i\n",
194 descs_used, vq->vq.num_free);
195 /* FIXME: for historical reasons, we force a notify here if
196 * there are outgoing parts to the buffer. Presumably the
197 * host should service the ring ASAP. */
204 /* We're about to use some buffers from the free list. */
205 vq->vq.num_free -= descs_used;
207 for (n = 0; n < out_sgs; n++) {
208 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
209 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
210 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
211 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
213 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
216 for (; n < (out_sgs + in_sgs); n++) {
217 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
218 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
219 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
220 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
222 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
225 /* Last one doesn't continue. */
226 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
228 /* Update free pointer */
230 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
235 vq->data[head] = data;
237 /* Put entry in available array (but don't update avail->idx until they
239 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1);
240 if (vq->avail[avail] != head) {
241 vq->avail[avail] = head;
242 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
245 /* Descriptors and available array need to be set before we expose the
246 * new available array entries. */
247 virtio_wmb(vq->weak_barriers);
248 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1);
251 pr_debug("Added buffer head %i to %p\n", head, vq);
254 /* This is very unlikely, but theoretically possible. Kick
256 if (unlikely(vq->num_added == (1 << 16) - 1))
263 * virtqueue_add_sgs - expose buffers to other end
264 * @vq: the struct virtqueue we're talking about.
265 * @sgs: array of terminated scatterlists.
266 * @out_num: the number of scatterlists readable by other side
267 * @in_num: the number of scatterlists which are writable (after readable ones)
268 * @data: the token identifying the buffer.
269 * @gfp: how to do memory allocations (if necessary).
271 * Caller must ensure we don't call this with other virtqueue operations
272 * at the same time (except where noted).
274 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
276 int virtqueue_add_sgs(struct virtqueue *_vq,
277 struct scatterlist *sgs[],
278 unsigned int out_sgs,
283 unsigned int i, total_sg = 0;
285 /* Count them first. */
286 for (i = 0; i < out_sgs + in_sgs; i++) {
287 struct scatterlist *sg;
288 for (sg = sgs[i]; sg; sg = sg_next(sg))
291 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
293 EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
296 * virtqueue_add_outbuf - expose output buffers to other end
297 * @vq: the struct virtqueue we're talking about.
298 * @sg: scatterlist (must be well-formed and terminated!)
299 * @num: the number of entries in @sg readable by other side
300 * @data: the token identifying the buffer.
301 * @gfp: how to do memory allocations (if necessary).
303 * Caller must ensure we don't call this with other virtqueue operations
304 * at the same time (except where noted).
306 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
308 int virtqueue_add_outbuf(struct virtqueue *vq,
309 struct scatterlist *sg, unsigned int num,
313 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
315 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
318 * virtqueue_add_inbuf - expose input buffers to other end
319 * @vq: the struct virtqueue we're talking about.
320 * @sg: scatterlist (must be well-formed and terminated!)
321 * @num: the number of entries in @sg writable by other side
322 * @data: the token identifying the buffer.
323 * @gfp: how to do memory allocations (if necessary).
325 * Caller must ensure we don't call this with other virtqueue operations
326 * at the same time (except where noted).
328 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
330 int virtqueue_add_inbuf(struct virtqueue *vq,
331 struct scatterlist *sg, unsigned int num,
335 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
337 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
340 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
341 * @vq: the struct virtqueue
343 * Instead of virtqueue_kick(), you can do:
344 * if (virtqueue_kick_prepare(vq))
345 * virtqueue_notify(vq);
347 * This is sometimes useful because the virtqueue_kick_prepare() needs
348 * to be serialized, but the actual virtqueue_notify() call does not.
350 bool virtqueue_kick_prepare(struct virtqueue *_vq)
352 struct vring_virtqueue *vq = to_vvq(_vq);
357 /* We need to expose available array entries before checking avail
359 virtio_mb(vq->weak_barriers);
361 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added;
362 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx);
366 if (vq->last_add_time_valid) {
367 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
368 vq->last_add_time)) > 100);
370 vq->last_add_time_valid = false;
374 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
377 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
382 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
385 * virtqueue_notify - second half of split virtqueue_kick call.
386 * @vq: the struct virtqueue
388 * This does not need to be serialized.
390 * Returns false if host notify failed or queue is broken, otherwise true.
392 bool virtqueue_notify(struct virtqueue *_vq)
394 struct vring_virtqueue *vq = to_vvq(_vq);
396 if (unlikely(vq->broken))
399 /* Prod other side to tell it about changes. */
400 if (!vq->notify(_vq)) {
406 EXPORT_SYMBOL_GPL(virtqueue_notify);
409 * virtqueue_kick - update after add_buf
410 * @vq: the struct virtqueue
412 * After one or more virtqueue_add_* calls, invoke this to kick
415 * Caller must ensure we don't call this with other virtqueue
416 * operations at the same time (except where noted).
418 * Returns false if kick failed, otherwise true.
420 bool virtqueue_kick(struct virtqueue *vq)
422 if (virtqueue_kick_prepare(vq))
423 return virtqueue_notify(vq);
426 EXPORT_SYMBOL_GPL(virtqueue_kick);
428 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
432 /* Clear data ptr. */
433 vq->data[head] = NULL;
435 /* Put back on free list: find end */
438 /* Free the indirect table */
439 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
440 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
442 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
443 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
447 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
448 vq->free_head = head;
449 /* Plus final descriptor */
453 static inline bool more_used(const struct vring_virtqueue *vq)
455 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
459 * virtqueue_get_buf - get the next used buffer
460 * @vq: the struct virtqueue we're talking about.
461 * @len: the length written into the buffer
463 * If the driver wrote data into the buffer, @len will be set to the
464 * amount written. This means you don't need to clear the buffer
465 * beforehand to ensure there's no data leakage in the case of short
468 * Caller must ensure we don't call this with other virtqueue
469 * operations at the same time (except where noted).
471 * Returns NULL if there are no used buffers, or the "data" token
472 * handed to virtqueue_add_*().
474 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
476 struct vring_virtqueue *vq = to_vvq(_vq);
483 if (unlikely(vq->broken)) {
488 if (!more_used(vq)) {
489 pr_debug("No more buffers in queue\n");
494 /* Only get used array entries after they have been exposed by host. */
495 virtio_rmb(vq->weak_barriers);
497 last_used = (vq->last_used_idx & (vq->vring.num - 1));
498 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
499 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
501 if (unlikely(i >= vq->vring.num)) {
502 BAD_RING(vq, "id %u out of range\n", i);
505 if (unlikely(!vq->data[i])) {
506 BAD_RING(vq, "id %u is not a head!\n", i);
510 /* detach_buf clears data, so grab it now. */
514 /* If we expect an interrupt for the next entry, tell host
515 * by writing event index and flush out the write before
516 * the read in the next get_buf call. */
517 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) {
518 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
519 virtio_mb(vq->weak_barriers);
523 vq->last_add_time_valid = false;
529 EXPORT_SYMBOL_GPL(virtqueue_get_buf);
532 * virtqueue_disable_cb - disable callbacks
533 * @vq: the struct virtqueue we're talking about.
535 * Note that this is not necessarily synchronous, hence unreliable and only
536 * useful as an optimization.
538 * Unlike other operations, this need not be serialized.
540 void virtqueue_disable_cb(struct virtqueue *_vq)
542 struct vring_virtqueue *vq = to_vvq(_vq);
544 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT);
546 EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
549 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
550 * @vq: the struct virtqueue we're talking about.
552 * This re-enables callbacks; it returns current queue state
553 * in an opaque unsigned value. This value should be later tested by
554 * virtqueue_poll, to detect a possible race between the driver checking for
555 * more work, and enabling callbacks.
557 * Caller must ensure we don't call this with other virtqueue
558 * operations at the same time (except where noted).
560 unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
562 struct vring_virtqueue *vq = to_vvq(_vq);
567 /* We optimistically turn back on interrupts, then check if there was
569 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
570 * either clear the flags bit or point the event index at the next
571 * entry. Always do both to keep code simple. */
572 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
573 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
575 return last_used_idx;
577 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
580 * virtqueue_poll - query pending used buffers
581 * @vq: the struct virtqueue we're talking about.
582 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
584 * Returns "true" if there are pending used buffers in the queue.
586 * This does not need to be serialized.
588 bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
590 struct vring_virtqueue *vq = to_vvq(_vq);
592 virtio_mb(vq->weak_barriers);
593 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
595 EXPORT_SYMBOL_GPL(virtqueue_poll);
598 * virtqueue_enable_cb - restart callbacks after disable_cb.
599 * @vq: the struct virtqueue we're talking about.
601 * This re-enables callbacks; it returns "false" if there are pending
602 * buffers in the queue, to detect a possible race between the driver
603 * checking for more work, and enabling callbacks.
605 * Caller must ensure we don't call this with other virtqueue
606 * operations at the same time (except where noted).
608 bool virtqueue_enable_cb(struct virtqueue *_vq)
610 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
611 return !virtqueue_poll(_vq, last_used_idx);
613 EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
616 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
617 * @vq: the struct virtqueue we're talking about.
619 * This re-enables callbacks but hints to the other side to delay
620 * interrupts until most of the available buffers have been processed;
621 * it returns "false" if there are many pending buffers in the queue,
622 * to detect a possible race between the driver checking for more work,
623 * and enabling callbacks.
625 * Caller must ensure we don't call this with other virtqueue
626 * operations at the same time (except where noted).
628 bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
630 struct vring_virtqueue *vq = to_vvq(_vq);
635 /* We optimistically turn back on interrupts, then check if there was
637 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
638 * either clear the flags bit or point the event index at the next
639 * entry. Always do both to keep code simple. */
640 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT);
641 /* TODO: tune this threshold */
642 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4;
643 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
644 virtio_mb(vq->weak_barriers);
645 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
653 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
656 * virtqueue_detach_unused_buf - detach first unused buffer
657 * @vq: the struct virtqueue we're talking about.
659 * Returns NULL or the "data" token handed to virtqueue_add_*().
660 * This is not valid on an active queue; it is useful only for device
663 void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
665 struct vring_virtqueue *vq = to_vvq(_vq);
671 for (i = 0; i < vq->vring.num; i++) {
674 /* detach_buf clears data, so grab it now. */
677 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1);
681 /* That should have freed everything. */
682 BUG_ON(vq->vq.num_free != vq->vring.num);
687 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
689 irqreturn_t vring_interrupt(int irq, void *_vq)
691 struct vring_virtqueue *vq = to_vvq(_vq);
693 if (!more_used(vq)) {
694 pr_debug("virtqueue interrupt with no work for %p\n", vq);
698 if (unlikely(vq->broken))
701 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
703 vq->vq.callback(&vq->vq);
707 EXPORT_SYMBOL_GPL(vring_interrupt);
709 struct virtqueue *vring_new_virtqueue(unsigned int index,
711 unsigned int vring_align,
712 struct virtio_device *vdev,
715 bool (*notify)(struct virtqueue *),
716 void (*callback)(struct virtqueue *),
719 struct vring_virtqueue *vq;
722 /* We assume num is a power of 2. */
723 if (num & (num - 1)) {
724 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
728 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
731 vq->avail = kzalloc(sizeof (*vq->avail) * num, GFP_KERNEL);
737 vring_init(&vq->vring, num, pages, vring_align);
738 vq->vq.callback = callback;
741 vq->vq.num_free = num;
742 vq->vq.index = index;
744 vq->weak_barriers = weak_barriers;
746 vq->last_used_idx = 0;
748 list_add_tail(&vq->vq.list, &vdev->vqs);
751 vq->last_add_time_valid = false;
754 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
755 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
757 /* No callback? Tell other side not to bother us. */
759 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT);
761 /* Put everything in free lists. */
763 for (i = 0; i < num-1; i++) {
764 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
771 EXPORT_SYMBOL_GPL(vring_new_virtqueue);
773 void vring_del_virtqueue(struct virtqueue *vq)
778 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
780 /* Manipulates transport-specific feature bits. */
781 void vring_transport_features(struct virtio_device *vdev)
785 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
787 case VIRTIO_RING_F_INDIRECT_DESC:
789 case VIRTIO_RING_F_EVENT_IDX:
791 case VIRTIO_F_VERSION_1:
794 /* We don't understand this bit. */
795 __virtio_clear_bit(vdev, i);
799 EXPORT_SYMBOL_GPL(vring_transport_features);
802 * virtqueue_get_vring_size - return the size of the virtqueue's vring
803 * @vq: the struct virtqueue containing the vring of interest.
805 * Returns the size of the vring. This is mainly used for boasting to
806 * userspace. Unlike other operations, this need not be serialized.
808 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
811 struct vring_virtqueue *vq = to_vvq(_vq);
813 return vq->vring.num;
815 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
817 bool virtqueue_is_broken(struct virtqueue *_vq)
819 struct vring_virtqueue *vq = to_vvq(_vq);
823 EXPORT_SYMBOL_GPL(virtqueue_is_broken);
826 * This should prevent the device from being used, allowing drivers to
827 * recover. You may need to grab appropriate locks to flush.
829 void virtio_break_device(struct virtio_device *dev)
831 struct virtqueue *_vq;
833 list_for_each_entry(_vq, &dev->vqs, list) {
834 struct vring_virtqueue *vq = to_vvq(_vq);
838 EXPORT_SYMBOL_GPL(virtio_break_device);
840 void *virtqueue_get_avail(struct virtqueue *_vq)
842 struct vring_virtqueue *vq = to_vvq(_vq);
844 return vq->vring.avail;
846 EXPORT_SYMBOL_GPL(virtqueue_get_avail);
848 void *virtqueue_get_used(struct virtqueue *_vq)
850 struct vring_virtqueue *vq = to_vvq(_vq);
852 return vq->vring.used;
854 EXPORT_SYMBOL_GPL(virtqueue_get_used);
856 MODULE_LICENSE("GPL");