]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - tools/kvm/virtio/blk.c
kvm tools: Prefix error() and friends helpers with pr_
[karo-tx-linux.git] / tools / kvm / virtio / blk.c
1 #include "kvm/virtio-blk.h"
2
3 #include "kvm/virtio-pci-dev.h"
4 #include "kvm/irq.h"
5 #include "kvm/disk-image.h"
6 #include "kvm/virtio.h"
7 #include "kvm/ioport.h"
8 #include "kvm/mutex.h"
9 #include "kvm/util.h"
10 #include "kvm/kvm.h"
11 #include "kvm/pci.h"
12 #include "kvm/threadpool.h"
13
14 #include <linux/virtio_ring.h>
15 #include <linux/virtio_blk.h>
16
17 #include <linux/types.h>
18 #include <pthread.h>
19
20 #define VIRTIO_BLK_MAX_DEV              4
21 #define NUM_VIRT_QUEUES                 1
22
23 #define VIRTIO_BLK_QUEUE_SIZE           128
24 /*
25  * the header and status consume too entries
26  */
27 #define DISK_SEG_MAX                    (VIRTIO_BLK_QUEUE_SIZE - 2)
28
29 struct blk_dev_job {
30         struct virt_queue               *vq;
31         struct blk_dev                  *bdev;
32         void                            *job_id;
33 };
34
35 struct blk_dev {
36         pthread_mutex_t                 mutex;
37
38         struct virtio_blk_config        blk_config;
39         struct disk_image               *disk;
40         u32                             host_features;
41         u32                             guest_features;
42         u16                             config_vector;
43         u8                              status;
44         u8                              isr;
45         u8                              idx;
46
47         /* virtio queue */
48         u16                             queue_selector;
49
50         struct virt_queue               vqs[NUM_VIRT_QUEUES];
51         struct blk_dev_job              jobs[NUM_VIRT_QUEUES];
52         struct pci_device_header        pci_hdr;
53 };
54
55 static struct blk_dev *bdevs[VIRTIO_BLK_MAX_DEV];
56
57 static bool virtio_blk_dev_in(struct blk_dev *bdev, void *data, unsigned long offset, int size, u32 count)
58 {
59         u8 *config_space = (u8 *) &bdev->blk_config;
60
61         if (size != 1 || count != 1)
62                 return false;
63
64         ioport__write8(data, config_space[offset - VIRTIO_MSI_CONFIG_VECTOR]);
65
66         return true;
67 }
68
69 /* Translate port into device id + offset in that device addr space */
70 static void virtio_blk_port2dev(u16 port, u16 base, u16 size, u16 *dev_idx, u16 *offset)
71 {
72         *dev_idx        = (port - base) / size;
73         *offset         = port - (base + *dev_idx * size);
74 }
75
76 static bool virtio_blk_pci_io_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
77 {
78         struct blk_dev *bdev;
79         u16 offset, dev_idx;
80         bool ret = true;
81
82         virtio_blk_port2dev(port, IOPORT_VIRTIO_BLK, IOPORT_VIRTIO_BLK_SIZE, &dev_idx, &offset);
83
84         bdev = bdevs[dev_idx];
85
86         mutex_lock(&bdev->mutex);
87
88         switch (offset) {
89         case VIRTIO_PCI_HOST_FEATURES:
90                 ioport__write32(data, bdev->host_features);
91                 break;
92         case VIRTIO_PCI_GUEST_FEATURES:
93                 ret             = false;
94                 break;
95         case VIRTIO_PCI_QUEUE_PFN:
96                 ioport__write32(data, bdev->vqs[bdev->queue_selector].pfn);
97                 break;
98         case VIRTIO_PCI_QUEUE_NUM:
99                 ioport__write16(data, VIRTIO_BLK_QUEUE_SIZE);
100                 break;
101         case VIRTIO_PCI_QUEUE_SEL:
102         case VIRTIO_PCI_QUEUE_NOTIFY:
103                 ret             = false;
104                 break;
105         case VIRTIO_PCI_STATUS:
106                 ioport__write8(data, bdev->status);
107                 break;
108         case VIRTIO_PCI_ISR:
109                 ioport__write8(data, bdev->isr);
110                 kvm__irq_line(kvm, bdev->pci_hdr.irq_line, VIRTIO_IRQ_LOW);
111                 bdev->isr = VIRTIO_IRQ_LOW;
112                 break;
113         case VIRTIO_MSI_CONFIG_VECTOR:
114                 ioport__write16(data, bdev->config_vector);
115                 break;
116         default:
117                 ret = virtio_blk_dev_in(bdev, data, offset, size, count);
118                 break;
119         };
120
121         mutex_unlock(&bdev->mutex);
122
123         return ret;
124 }
125
126 static bool virtio_blk_do_io_request(struct kvm *kvm,
127                                         struct blk_dev *bdev,
128                                         struct virt_queue *queue)
129 {
130         struct iovec iov[VIRTIO_BLK_QUEUE_SIZE];
131         struct virtio_blk_outhdr *req;
132         ssize_t block_cnt = -1;
133         u16 out, in, head;
134         u8 *status;
135
136         head                    = virt_queue__get_iov(queue, iov, &out, &in, kvm);
137
138         /* head */
139         req                     = iov[0].iov_base;
140
141         switch (req->type) {
142         case VIRTIO_BLK_T_IN:
143                 block_cnt       = disk_image__read(bdev->disk, req->sector, iov + 1, in + out - 2);
144                 break;
145         case VIRTIO_BLK_T_OUT:
146                 block_cnt       = disk_image__write(bdev->disk, req->sector, iov + 1, in + out - 2);
147                 break;
148         case VIRTIO_BLK_T_FLUSH:
149                 block_cnt       = disk_image__flush(bdev->disk);
150                 break;
151         default:
152                 pr_warning("request type %d", req->type);
153                 block_cnt       = -1;
154                 break;
155         }
156
157         /* status */
158         status                  = iov[out + in - 1].iov_base;
159         *status                 = (block_cnt < 0) ? VIRTIO_BLK_S_IOERR : VIRTIO_BLK_S_OK;
160
161         virt_queue__set_used_elem(queue, head, block_cnt);
162
163         return true;
164 }
165
166 static void virtio_blk_do_io(struct kvm *kvm, void *param)
167 {
168         struct blk_dev_job *job = param;
169         struct virt_queue *vq;
170         struct blk_dev *bdev;
171
172         vq                      = job->vq;
173         bdev                    = job->bdev;
174
175         while (virt_queue__available(vq))
176                 virtio_blk_do_io_request(kvm, bdev, vq);
177
178         virt_queue__trigger_irq(vq, bdev->pci_hdr.irq_line, &bdev->isr, kvm);
179 }
180
181 static bool virtio_blk_pci_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
182 {
183         struct blk_dev *bdev;
184         u16 offset, dev_idx;
185         bool ret = true;
186
187         virtio_blk_port2dev(port, IOPORT_VIRTIO_BLK, IOPORT_VIRTIO_BLK_SIZE, &dev_idx, &offset);
188
189         bdev = bdevs[dev_idx];
190
191         mutex_lock(&bdev->mutex);
192
193         switch (offset) {
194         case VIRTIO_PCI_GUEST_FEATURES:
195                 bdev->guest_features    = ioport__read32(data);
196                 break;
197         case VIRTIO_PCI_QUEUE_PFN: {
198                 struct virt_queue *queue;
199                 struct blk_dev_job *job;
200                 void *p;
201
202                 job = &bdev->jobs[bdev->queue_selector];
203
204                 queue                   = &bdev->vqs[bdev->queue_selector];
205                 queue->pfn              = ioport__read32(data);
206                 p                       = guest_pfn_to_host(kvm, queue->pfn);
207
208                 vring_init(&queue->vring, VIRTIO_BLK_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
209
210                 *job                    = (struct blk_dev_job) {
211                         .vq                     = queue,
212                         .bdev                   = bdev,
213                 };
214
215                 job->job_id = thread_pool__add_job(kvm, virtio_blk_do_io, job);
216
217                 break;
218         }
219         case VIRTIO_PCI_QUEUE_SEL:
220                 bdev->queue_selector    = ioport__read16(data);
221                 break;
222         case VIRTIO_PCI_QUEUE_NOTIFY: {
223                 u16 queue_index;
224
225                 queue_index             = ioport__read16(data);
226                 thread_pool__do_job(bdev->jobs[queue_index].job_id);
227
228                 break;
229         }
230         case VIRTIO_PCI_STATUS:
231                 bdev->status            = ioport__read8(data);
232                 break;
233         case VIRTIO_MSI_CONFIG_VECTOR:
234                 bdev->config_vector     = VIRTIO_MSI_NO_VECTOR;
235                 break;
236         case VIRTIO_MSI_QUEUE_VECTOR:
237                 break;
238         default:
239                 ret                     = false;
240                 break;
241         };
242
243         mutex_unlock(&bdev->mutex);
244
245         return ret;
246 }
247
248 static struct ioport_operations virtio_blk_io_ops = {
249         .io_in          = virtio_blk_pci_io_in,
250         .io_out         = virtio_blk_pci_io_out,
251 };
252
253 static int virtio_blk_find_empty_dev(void)
254 {
255         int i;
256
257         for (i = 0; i < VIRTIO_BLK_MAX_DEV; i++) {
258                 if (bdevs[i] == NULL)
259                         return i;
260         }
261
262         return -1;
263 }
264
265 void virtio_blk__init(struct kvm *kvm, struct disk_image *disk)
266 {
267         u16 blk_dev_base_addr;
268         u8 dev, pin, line;
269         struct blk_dev *bdev;
270         int new_dev_idx;
271
272         if (!disk)
273                 return;
274
275         new_dev_idx             = virtio_blk_find_empty_dev();
276         if (new_dev_idx < 0)
277                 die("Could not find an empty block device slot");
278
279         bdevs[new_dev_idx]      = calloc(1, sizeof(struct blk_dev));
280         if (bdevs[new_dev_idx] == NULL)
281                 die("Failed allocating bdev");
282
283         bdev                    = bdevs[new_dev_idx];
284
285         blk_dev_base_addr       = IOPORT_VIRTIO_BLK + new_dev_idx * IOPORT_VIRTIO_BLK_SIZE;
286
287         *bdev                   = (struct blk_dev) {
288                 .mutex                          = PTHREAD_MUTEX_INITIALIZER,
289                 .disk                           = disk,
290                 .idx                            = new_dev_idx,
291                 .blk_config                     = (struct virtio_blk_config) {
292                         .capacity               = disk->size / SECTOR_SIZE,
293                         .seg_max                = DISK_SEG_MAX,
294                 },
295                 .pci_hdr = (struct pci_device_header) {
296                         .vendor_id              = PCI_VENDOR_ID_REDHAT_QUMRANET,
297                         .device_id              = PCI_DEVICE_ID_VIRTIO_BLK,
298                         .header_type            = PCI_HEADER_TYPE_NORMAL,
299                         .revision_id            = 0,
300                         .class                  = 0x010000,
301                         .subsys_vendor_id       = PCI_SUBSYSTEM_VENDOR_ID_REDHAT_QUMRANET,
302                         .subsys_id              = VIRTIO_ID_BLOCK,
303                         .bar[0]                 = blk_dev_base_addr | PCI_BASE_ADDRESS_SPACE_IO,
304                 },
305                 /*
306                  * Note we don't set VIRTIO_BLK_F_GEOMETRY here so the
307                  * guest kernel will compute disk geometry by own, the
308                  * same applies to VIRTIO_BLK_F_BLK_SIZE
309                  */
310                 .host_features                  = (1UL << VIRTIO_BLK_F_SEG_MAX | 1UL << VIRTIO_BLK_F_FLUSH),
311         };
312
313         if (irq__register_device(VIRTIO_ID_BLOCK, &dev, &pin, &line) < 0)
314                 return;
315
316         bdev->pci_hdr.irq_pin   = pin;
317         bdev->pci_hdr.irq_line  = line;
318
319         pci__register(&bdev->pci_hdr, dev);
320
321         ioport__register(blk_dev_base_addr, &virtio_blk_io_ops, IOPORT_VIRTIO_BLK_SIZE);
322 }