2 * Virtio SCSI HBA driver
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/mempool.h>
21 #include <linux/virtio.h>
22 #include <linux/virtio_ids.h>
23 #include <linux/virtio_config.h>
24 #include <linux/virtio_scsi.h>
25 #include <linux/cpu.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_cmnd.h>
30 #define VIRTIO_SCSI_MEMPOOL_SZ 64
31 #define VIRTIO_SCSI_EVENT_LEN 8
32 #define VIRTIO_SCSI_VQ_BASE 2
34 /* Command queue element */
35 struct virtio_scsi_cmd {
37 struct completion *comp;
39 struct virtio_scsi_cmd_req cmd;
40 struct virtio_scsi_ctrl_tmf_req tmf;
41 struct virtio_scsi_ctrl_an_req an;
44 struct virtio_scsi_cmd_resp cmd;
45 struct virtio_scsi_ctrl_tmf_resp tmf;
46 struct virtio_scsi_ctrl_an_resp an;
47 struct virtio_scsi_event evt;
49 } ____cacheline_aligned_in_smp;
51 struct virtio_scsi_event_node {
52 struct virtio_scsi *vscsi;
53 struct virtio_scsi_event event;
54 struct work_struct work;
57 struct virtio_scsi_vq {
65 * Per-target queue state.
67 * This struct holds the data needed by the queue steering policy. When a
68 * target is sent multiple requests, we need to drive them to the same queue so
69 * that FIFO processing order is kept. However, if a target was idle, we can
70 * choose a queue arbitrarily. In this case the queue is chosen according to
71 * the current VCPU, so the driver expects the number of request queues to be
72 * equal to the number of VCPUs. This makes it easy and fast to select the
73 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
74 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
76 * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
77 * could be done locklessly, but we do not do it yet.
79 * Decrements of reqs are never concurrent with writes of req_vq: before the
80 * decrement reqs will be != 0; after the decrement the virtqueue completion
81 * routine will not use the req_vq so it can be changed by a new request.
82 * Thus they can happen outside the tgt_lock, provided of course we make reqs
85 struct virtio_scsi_target_state {
86 /* This spinlock never held at the same time as vq_lock. */
89 /* Count of outstanding requests. */
92 /* Currently active virtqueue for requests sent to this target. */
93 struct virtio_scsi_vq *req_vq;
96 /* Driver instance state */
98 struct virtio_device *vdev;
100 /* Get some buffers ready for event vq */
101 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
105 /* If the affinity hint is set for virtqueues */
106 bool affinity_hint_set;
108 /* CPU hotplug notifier */
109 struct notifier_block nb;
111 struct virtio_scsi_vq ctrl_vq;
112 struct virtio_scsi_vq event_vq;
113 struct virtio_scsi_vq req_vqs[];
116 static struct kmem_cache *virtscsi_cmd_cache;
117 static mempool_t *virtscsi_cmd_pool;
119 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
124 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
129 if (!scsi_bidi_cmnd(sc)) {
130 scsi_set_resid(sc, resid);
134 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
135 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
139 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
141 * Called with vq_lock held.
143 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
145 struct virtio_scsi_cmd *cmd = buf;
146 struct scsi_cmnd *sc = cmd->sc;
147 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
148 struct virtio_scsi_target_state *tgt =
149 scsi_target(sc->device)->hostdata;
151 dev_dbg(&sc->device->sdev_gendev,
152 "cmd %p response %u status %#02x sense_len %u\n",
153 sc, resp->response, resp->status, resp->sense_len);
155 sc->result = resp->status;
156 virtscsi_compute_resid(sc, resp->resid);
157 switch (resp->response) {
158 case VIRTIO_SCSI_S_OK:
159 set_host_byte(sc, DID_OK);
161 case VIRTIO_SCSI_S_OVERRUN:
162 set_host_byte(sc, DID_ERROR);
164 case VIRTIO_SCSI_S_ABORTED:
165 set_host_byte(sc, DID_ABORT);
167 case VIRTIO_SCSI_S_BAD_TARGET:
168 set_host_byte(sc, DID_BAD_TARGET);
170 case VIRTIO_SCSI_S_RESET:
171 set_host_byte(sc, DID_RESET);
173 case VIRTIO_SCSI_S_BUSY:
174 set_host_byte(sc, DID_BUS_BUSY);
176 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
177 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
179 case VIRTIO_SCSI_S_TARGET_FAILURE:
180 set_host_byte(sc, DID_TARGET_FAILURE);
182 case VIRTIO_SCSI_S_NEXUS_FAILURE:
183 set_host_byte(sc, DID_NEXUS_FAILURE);
186 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
189 case VIRTIO_SCSI_S_FAILURE:
190 set_host_byte(sc, DID_ERROR);
194 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
195 if (sc->sense_buffer) {
196 memcpy(sc->sense_buffer, resp->sense,
197 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
199 set_driver_byte(sc, DRIVER_SENSE);
204 atomic_dec(&tgt->reqs);
207 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
208 struct virtio_scsi_vq *virtscsi_vq,
209 void (*fn)(struct virtio_scsi *vscsi, void *buf))
214 struct virtqueue *vq = virtscsi_vq->vq;
216 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
218 virtqueue_disable_cb(vq);
219 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
222 if (unlikely(virtqueue_is_broken(vq)))
224 } while (!virtqueue_enable_cb(vq));
225 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
228 static void virtscsi_req_done(struct virtqueue *vq)
230 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
231 struct virtio_scsi *vscsi = shost_priv(sh);
232 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
233 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
235 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
238 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
240 struct virtio_scsi_cmd *cmd = buf;
243 complete_all(cmd->comp);
246 static void virtscsi_ctrl_done(struct virtqueue *vq)
248 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
249 struct virtio_scsi *vscsi = shost_priv(sh);
251 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
254 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
255 struct virtio_scsi_event_node *event_node)
258 struct scatterlist sg;
261 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
263 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
265 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
268 virtqueue_kick(vscsi->event_vq.vq);
270 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
275 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
279 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
280 vscsi->event_list[i].vscsi = vscsi;
281 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
287 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
291 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
292 cancel_work_sync(&vscsi->event_list[i].work);
295 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
296 struct virtio_scsi_event *event)
298 struct scsi_device *sdev;
299 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
300 unsigned int target = event->lun[1];
301 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
303 switch (event->reason) {
304 case VIRTIO_SCSI_EVT_RESET_RESCAN:
305 scsi_add_device(shost, 0, target, lun);
307 case VIRTIO_SCSI_EVT_RESET_REMOVED:
308 sdev = scsi_device_lookup(shost, 0, target, lun);
310 scsi_remove_device(sdev);
311 scsi_device_put(sdev);
313 pr_err("SCSI device %d 0 %d %d not found\n",
314 shost->host_no, target, lun);
318 pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
322 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
323 struct virtio_scsi_event *event)
325 struct scsi_device *sdev;
326 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
327 unsigned int target = event->lun[1];
328 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
329 u8 asc = event->reason & 255;
330 u8 ascq = event->reason >> 8;
332 sdev = scsi_device_lookup(shost, 0, target, lun);
334 pr_err("SCSI device %d 0 %d %d not found\n",
335 shost->host_no, target, lun);
339 /* Handle "Parameters changed", "Mode parameters changed", and
340 "Capacity data has changed". */
341 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
342 scsi_rescan_device(&sdev->sdev_gendev);
344 scsi_device_put(sdev);
347 static void virtscsi_handle_event(struct work_struct *work)
349 struct virtio_scsi_event_node *event_node =
350 container_of(work, struct virtio_scsi_event_node, work);
351 struct virtio_scsi *vscsi = event_node->vscsi;
352 struct virtio_scsi_event *event = &event_node->event;
354 if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
355 event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
356 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
359 switch (event->event) {
360 case VIRTIO_SCSI_T_NO_EVENT:
362 case VIRTIO_SCSI_T_TRANSPORT_RESET:
363 virtscsi_handle_transport_reset(vscsi, event);
365 case VIRTIO_SCSI_T_PARAM_CHANGE:
366 virtscsi_handle_param_change(vscsi, event);
369 pr_err("Unsupport virtio scsi event %x\n", event->event);
371 virtscsi_kick_event(vscsi, event_node);
374 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
376 struct virtio_scsi_event_node *event_node = buf;
378 INIT_WORK(&event_node->work, virtscsi_handle_event);
379 schedule_work(&event_node->work);
382 static void virtscsi_event_done(struct virtqueue *vq)
384 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
385 struct virtio_scsi *vscsi = shost_priv(sh);
387 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
391 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
392 * @vq : the struct virtqueue we're talking about
393 * @cmd : command structure
394 * @req_size : size of the request buffer
395 * @resp_size : size of the response buffer
396 * @gfp : flags to use for memory allocations
398 static int virtscsi_add_cmd(struct virtqueue *vq,
399 struct virtio_scsi_cmd *cmd,
400 size_t req_size, size_t resp_size, gfp_t gfp)
402 struct scsi_cmnd *sc = cmd->sc;
403 struct scatterlist *sgs[4], req, resp;
404 struct sg_table *out, *in;
405 unsigned out_num = 0, in_num = 0;
409 if (sc && sc->sc_data_direction != DMA_NONE) {
410 if (sc->sc_data_direction != DMA_FROM_DEVICE)
411 out = &scsi_out(sc)->table;
412 if (sc->sc_data_direction != DMA_TO_DEVICE)
413 in = &scsi_in(sc)->table;
416 /* Request header. */
417 sg_init_one(&req, &cmd->req, req_size);
418 sgs[out_num++] = &req;
420 /* Data-out buffer. */
422 sgs[out_num++] = out->sgl;
424 /* Response header. */
425 sg_init_one(&resp, &cmd->resp, resp_size);
426 sgs[out_num + in_num++] = &resp;
430 sgs[out_num + in_num++] = in->sgl;
432 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
435 static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
436 struct virtio_scsi_cmd *cmd,
437 size_t req_size, size_t resp_size, gfp_t gfp)
441 bool needs_kick = false;
443 spin_lock_irqsave(&vq->vq_lock, flags);
444 err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
446 needs_kick = virtqueue_kick_prepare(vq->vq);
448 spin_unlock_irqrestore(&vq->vq_lock, flags);
451 virtqueue_notify(vq->vq);
455 static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
456 struct virtio_scsi_vq *req_vq,
457 struct scsi_cmnd *sc)
459 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
460 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
462 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
464 /* TODO: check feature bit and fail if unsupported? */
465 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
467 dev_dbg(&sc->device->sdev_gendev,
468 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
470 memset(cmd, 0, sizeof(*cmd));
472 cmd->req.cmd = (struct virtio_scsi_cmd_req){
474 .lun[1] = sc->device->id,
475 .lun[2] = (sc->device->lun >> 8) | 0x40,
476 .lun[3] = sc->device->lun & 0xff,
477 .tag = (unsigned long)sc,
478 .task_attr = VIRTIO_SCSI_S_SIMPLE,
483 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
484 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
486 if (virtscsi_kick_cmd(req_vq, cmd,
487 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
489 return SCSI_MLQUEUE_HOST_BUSY;
493 static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
494 struct scsi_cmnd *sc)
496 struct virtio_scsi *vscsi = shost_priv(sh);
497 struct virtio_scsi_target_state *tgt =
498 scsi_target(sc->device)->hostdata;
500 atomic_inc(&tgt->reqs);
501 return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
504 static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
505 struct virtio_scsi_target_state *tgt)
507 struct virtio_scsi_vq *vq;
511 spin_lock_irqsave(&tgt->tgt_lock, flags);
513 if (atomic_inc_return(&tgt->reqs) > 1)
516 queue_num = smp_processor_id();
517 while (unlikely(queue_num >= vscsi->num_queues))
518 queue_num -= vscsi->num_queues;
520 tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
523 spin_unlock_irqrestore(&tgt->tgt_lock, flags);
527 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
528 struct scsi_cmnd *sc)
530 struct virtio_scsi *vscsi = shost_priv(sh);
531 struct virtio_scsi_target_state *tgt =
532 scsi_target(sc->device)->hostdata;
533 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
535 return virtscsi_queuecommand(vscsi, req_vq, sc);
538 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
540 DECLARE_COMPLETION_ONSTACK(comp);
544 if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
545 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
549 wait_for_completion(&comp);
550 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
551 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
555 mempool_free(cmd, virtscsi_cmd_pool);
559 static int virtscsi_device_reset(struct scsi_cmnd *sc)
561 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
562 struct virtio_scsi_cmd *cmd;
564 sdev_printk(KERN_INFO, sc->device, "device reset\n");
565 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
569 memset(cmd, 0, sizeof(*cmd));
571 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
572 .type = VIRTIO_SCSI_T_TMF,
573 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
575 .lun[1] = sc->device->id,
576 .lun[2] = (sc->device->lun >> 8) | 0x40,
577 .lun[3] = sc->device->lun & 0xff,
579 return virtscsi_tmf(vscsi, cmd);
582 static int virtscsi_abort(struct scsi_cmnd *sc)
584 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
585 struct virtio_scsi_cmd *cmd;
587 scmd_printk(KERN_INFO, sc, "abort\n");
588 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
592 memset(cmd, 0, sizeof(*cmd));
594 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
595 .type = VIRTIO_SCSI_T_TMF,
596 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
598 .lun[1] = sc->device->id,
599 .lun[2] = (sc->device->lun >> 8) | 0x40,
600 .lun[3] = sc->device->lun & 0xff,
601 .tag = (unsigned long)sc,
603 return virtscsi_tmf(vscsi, cmd);
606 static int virtscsi_target_alloc(struct scsi_target *starget)
608 struct virtio_scsi_target_state *tgt =
609 kmalloc(sizeof(*tgt), GFP_KERNEL);
613 spin_lock_init(&tgt->tgt_lock);
614 atomic_set(&tgt->reqs, 0);
617 starget->hostdata = tgt;
621 static void virtscsi_target_destroy(struct scsi_target *starget)
623 struct virtio_scsi_target_state *tgt = starget->hostdata;
627 static struct scsi_host_template virtscsi_host_template_single = {
628 .module = THIS_MODULE,
629 .name = "Virtio SCSI HBA",
630 .proc_name = "virtio_scsi",
632 .cmd_size = sizeof(struct virtio_scsi_cmd),
633 .queuecommand = virtscsi_queuecommand_single,
634 .eh_abort_handler = virtscsi_abort,
635 .eh_device_reset_handler = virtscsi_device_reset,
638 .dma_boundary = UINT_MAX,
639 .use_clustering = ENABLE_CLUSTERING,
640 .target_alloc = virtscsi_target_alloc,
641 .target_destroy = virtscsi_target_destroy,
644 static struct scsi_host_template virtscsi_host_template_multi = {
645 .module = THIS_MODULE,
646 .name = "Virtio SCSI HBA",
647 .proc_name = "virtio_scsi",
649 .cmd_size = sizeof(struct virtio_scsi_cmd),
650 .queuecommand = virtscsi_queuecommand_multi,
651 .eh_abort_handler = virtscsi_abort,
652 .eh_device_reset_handler = virtscsi_device_reset,
655 .dma_boundary = UINT_MAX,
656 .use_clustering = ENABLE_CLUSTERING,
657 .target_alloc = virtscsi_target_alloc,
658 .target_destroy = virtscsi_target_destroy,
661 #define virtscsi_config_get(vdev, fld) \
663 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
664 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
668 #define virtscsi_config_set(vdev, fld, val) \
670 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
671 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
674 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
679 /* In multiqueue mode, when the number of cpu is equal
680 * to the number of request queues, we let the qeueues
681 * to be private to one cpu by setting the affinity hint
682 * to eliminate the contention.
684 if ((vscsi->num_queues == 1 ||
685 vscsi->num_queues != num_online_cpus()) && affinity) {
686 if (vscsi->affinity_hint_set)
694 for_each_online_cpu(cpu) {
695 virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
699 vscsi->affinity_hint_set = true;
701 for (i = 0; i < vscsi->num_queues; i++) {
702 if (!vscsi->req_vqs[i].vq)
705 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
708 vscsi->affinity_hint_set = false;
712 static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
715 __virtscsi_set_affinity(vscsi, affinity);
719 static int virtscsi_cpu_callback(struct notifier_block *nfb,
720 unsigned long action, void *hcpu)
722 struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
725 case CPU_ONLINE_FROZEN:
727 case CPU_DEAD_FROZEN:
728 __virtscsi_set_affinity(vscsi, true);
736 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
737 struct virtqueue *vq)
739 spin_lock_init(&virtscsi_vq->vq_lock);
740 virtscsi_vq->vq = vq;
743 static void virtscsi_scan(struct virtio_device *vdev)
745 struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
747 scsi_scan_host(shost);
750 static void virtscsi_remove_vqs(struct virtio_device *vdev)
752 struct Scsi_Host *sh = virtio_scsi_host(vdev);
753 struct virtio_scsi *vscsi = shost_priv(sh);
755 virtscsi_set_affinity(vscsi, false);
757 /* Stop all the virtqueues. */
758 vdev->config->reset(vdev);
760 vdev->config->del_vqs(vdev);
763 static int virtscsi_init(struct virtio_device *vdev,
764 struct virtio_scsi *vscsi)
769 vq_callback_t **callbacks;
771 struct virtqueue **vqs;
773 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
774 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
775 callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
776 names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
778 if (!callbacks || !vqs || !names) {
783 callbacks[0] = virtscsi_ctrl_done;
784 callbacks[1] = virtscsi_event_done;
785 names[0] = "control";
787 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
788 callbacks[i] = virtscsi_req_done;
789 names[i] = "request";
792 /* Discover virtqueues and write information to configuration. */
793 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
797 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
798 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
799 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
800 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
803 virtscsi_set_affinity(vscsi, true);
805 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
806 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
808 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
809 virtscsi_kick_event_all(vscsi);
818 virtscsi_remove_vqs(vdev);
822 static int virtscsi_probe(struct virtio_device *vdev)
824 struct Scsi_Host *shost;
825 struct virtio_scsi *vscsi;
827 u32 sg_elems, num_targets;
830 struct scsi_host_template *hostt;
832 /* We need to know how many queues before we allocate. */
833 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
835 num_targets = virtscsi_config_get(vdev, max_target) + 1;
838 hostt = &virtscsi_host_template_single;
840 hostt = &virtscsi_host_template_multi;
842 shost = scsi_host_alloc(hostt,
843 sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
847 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
848 shost->sg_tablesize = sg_elems;
849 vscsi = shost_priv(shost);
851 vscsi->num_queues = num_queues;
854 err = virtscsi_init(vdev, vscsi);
856 goto virtscsi_init_failed;
858 vscsi->nb.notifier_call = &virtscsi_cpu_callback;
859 err = register_hotcpu_notifier(&vscsi->nb);
861 pr_err("registering cpu notifier failed\n");
862 goto scsi_add_host_failed;
865 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
866 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
867 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
869 /* LUNs > 256 are reported with format 1, so they go in the range
872 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
873 shost->max_id = num_targets;
874 shost->max_channel = 0;
875 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
876 err = scsi_add_host(shost, &vdev->dev);
878 goto scsi_add_host_failed;
880 * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
881 * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
885 scsi_add_host_failed:
886 vdev->config->del_vqs(vdev);
887 virtscsi_init_failed:
888 scsi_host_put(shost);
892 static void virtscsi_remove(struct virtio_device *vdev)
894 struct Scsi_Host *shost = virtio_scsi_host(vdev);
895 struct virtio_scsi *vscsi = shost_priv(shost);
897 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
898 virtscsi_cancel_event_work(vscsi);
900 scsi_remove_host(shost);
902 unregister_hotcpu_notifier(&vscsi->nb);
904 virtscsi_remove_vqs(vdev);
905 scsi_host_put(shost);
908 #ifdef CONFIG_PM_SLEEP
909 static int virtscsi_freeze(struct virtio_device *vdev)
911 struct Scsi_Host *sh = virtio_scsi_host(vdev);
912 struct virtio_scsi *vscsi = shost_priv(sh);
914 unregister_hotcpu_notifier(&vscsi->nb);
915 virtscsi_remove_vqs(vdev);
919 static int virtscsi_restore(struct virtio_device *vdev)
921 struct Scsi_Host *sh = virtio_scsi_host(vdev);
922 struct virtio_scsi *vscsi = shost_priv(sh);
925 err = virtscsi_init(vdev, vscsi);
929 err = register_hotcpu_notifier(&vscsi->nb);
931 vdev->config->del_vqs(vdev);
937 static struct virtio_device_id id_table[] = {
938 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
942 static unsigned int features[] = {
943 VIRTIO_SCSI_F_HOTPLUG,
944 VIRTIO_SCSI_F_CHANGE,
947 static struct virtio_driver virtio_scsi_driver = {
948 .feature_table = features,
949 .feature_table_size = ARRAY_SIZE(features),
950 .driver.name = KBUILD_MODNAME,
951 .driver.owner = THIS_MODULE,
952 .id_table = id_table,
953 .probe = virtscsi_probe,
954 .scan = virtscsi_scan,
955 #ifdef CONFIG_PM_SLEEP
956 .freeze = virtscsi_freeze,
957 .restore = virtscsi_restore,
959 .remove = virtscsi_remove,
962 static int __init init(void)
966 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
967 if (!virtscsi_cmd_cache) {
968 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
974 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
976 if (!virtscsi_cmd_pool) {
977 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
980 ret = register_virtio_driver(&virtio_scsi_driver);
987 if (virtscsi_cmd_pool) {
988 mempool_destroy(virtscsi_cmd_pool);
989 virtscsi_cmd_pool = NULL;
991 if (virtscsi_cmd_cache) {
992 kmem_cache_destroy(virtscsi_cmd_cache);
993 virtscsi_cmd_cache = NULL;
998 static void __exit fini(void)
1000 unregister_virtio_driver(&virtio_scsi_driver);
1001 mempool_destroy(virtscsi_cmd_pool);
1002 kmem_cache_destroy(virtscsi_cmd_cache);
1007 MODULE_DEVICE_TABLE(virtio, id_table);
1008 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1009 MODULE_LICENSE("GPL");