} while (0)
#endif
+ /* TUN device flags */
+
+ /* IFF_ATTACH_QUEUE is never stored in device flags,
+ * overload it to mean fasync when stored there.
+ */
+ #define TUN_FASYNC IFF_ATTACH_QUEUE
+
+ #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
+ IFF_VNET_LE | IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
u32 flow_count;
};
+ static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
+ {
+ return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
+ }
+
+ static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
+ {
+ return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
+ }
+
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & 0x3ff;
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
netif_carrier_off(tun->dev);
- if (!(tun->flags & TUN_PERSIST) &&
+ if (!(tun->flags & IFF_PERSIST) &&
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
}
BUG_ON(tun->numdisabled != 0);
- if (tun->flags & TUN_PERSIST)
+ if (tun->flags & IFF_PERSIST)
module_put(THIS_MODULE);
}
goto out;
err = -EBUSY;
- if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
+ if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
goto out;
err = -E2BIG;
struct tun_struct *tun = netdev_priv(dev);
switch (tun->flags & TUN_TYPE_MASK) {
- case TUN_TUN_DEV:
+ case IFF_TUN:
dev->netdev_ops = &tun_netdev_ops;
/* Point-to-Point TUN Device */
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
- case TUN_TAP_DEV:
+ case IFF_TAP:
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
int err;
u32 rxhash;
- if (!(tun->flags & TUN_NO_PI)) {
+ if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
len -= sizeof(pi);
offset += sizeof(pi);
}
- if (tun->flags & TUN_VNET_HDR) {
+ if (tun->flags & IFF_VNET_HDR) {
if (len < tun->vnet_hdr_sz)
return -EINVAL;
len -= tun->vnet_hdr_sz;
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
- gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
+ tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
+ gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
- if (gso.hdr_len > len)
+ if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL;
offset += tun->vnet_hdr_sz;
}
- if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
+ if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN ||
- (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
+ (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
return -EINVAL;
}
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+ copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
if (!zerocopy) {
copylen = len;
- if (gso.hdr_len > good_linear)
+ if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
linear = good_linear;
else
- linear = gso.hdr_len;
+ linear = tun16_to_cpu(tun, gso.hdr_len);
}
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
}
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, gso.csum_start,
- gso.csum_offset)) {
+ if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
+ tun16_to_cpu(tun, gso.csum_offset))) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
return -EINVAL;
}
switch (tun->flags & TUN_TYPE_MASK) {
- case TUN_TUN_DEV:
- if (tun->flags & TUN_NO_PI) {
+ case IFF_TUN:
+ if (tun->flags & IFF_NO_PI) {
switch (skb->data[0] & 0xf0) {
case 0x40:
pi.proto = htons(ETH_P_IP);
skb->protocol = pi.proto;
skb->dev = tun->dev;
break;
- case TUN_TAP_DEV:
+ case IFF_TAP:
skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb_shinfo(skb)->gso_size = gso.gso_size;
+ skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
if (vlan_tx_tag_present(skb))
vlan_hlen = VLAN_HLEN;
- if (tun->flags & TUN_VNET_HDR)
+ if (tun->flags & IFF_VNET_HDR)
vnet_hdr_sz = tun->vnet_hdr_sz;
- if (!(tun->flags & TUN_NO_PI)) {
+ if (!(tun->flags & IFF_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
return -EINVAL;
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
- gso.hdr_len = skb_headlen(skb);
- gso.gso_size = sinfo->gso_size;
+ gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
+ gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
if (sinfo->gso_type & SKB_GSO_TCPV4)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
else {
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, gso.gso_size,
- gso.hdr_len);
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
- min((int)gso.hdr_len, 64), true);
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
WARN_ON_ONCE(1);
return -EINVAL;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = skb_checksum_start_offset(skb) +
- vlan_hlen;
- gso.csum_offset = skb->csum_offset;
+ gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
+ vlan_hlen);
+ gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
static int tun_flags(struct tun_struct *tun)
{
- int flags = 0;
-
- if (tun->flags & TUN_TUN_DEV)
- flags |= IFF_TUN;
- else
- flags |= IFF_TAP;
-
- if (tun->flags & TUN_NO_PI)
- flags |= IFF_NO_PI;
-
- /* This flag has no real effect. We track the value for backwards
- * compatibility.
- */
- if (tun->flags & TUN_ONE_QUEUE)
- flags |= IFF_ONE_QUEUE;
-
- if (tun->flags & TUN_VNET_HDR)
- flags |= IFF_VNET_HDR;
-
- if (tun->flags & TUN_TAP_MQ)
- flags |= IFF_MULTI_QUEUE;
-
- if (tun->flags & TUN_PERSIST)
- flags |= IFF_PERSIST;
-
- return flags;
+ return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
}
static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
return -EINVAL;
if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
- !!(tun->flags & TUN_TAP_MQ))
+ !!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
if (tun_not_capable(tun))
if (err < 0)
return err;
- if (tun->flags & TUN_TAP_MQ &&
+ if (tun->flags & IFF_MULTI_QUEUE &&
(tun->numqueues + tun->numdisabled > 1)) {
/* One or more queue has already been attached, no need
* to initialize the device again.
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
/* TUN device */
- flags |= TUN_TUN_DEV;
+ flags |= IFF_TUN;
name = "tun%d";
} else if (ifr->ifr_flags & IFF_TAP) {
/* TAP device */
- flags |= TUN_TAP_DEV;
+ flags |= IFF_TAP;
name = "tap%d";
} else
return -EINVAL;
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
- if (ifr->ifr_flags & IFF_NO_PI)
- tun->flags |= TUN_NO_PI;
- else
- tun->flags &= ~TUN_NO_PI;
-
- /* This flag has no real effect. We track the value for backwards
- * compatibility.
- */
- if (ifr->ifr_flags & IFF_ONE_QUEUE)
- tun->flags |= TUN_ONE_QUEUE;
- else
- tun->flags &= ~TUN_ONE_QUEUE;
-
- if (ifr->ifr_flags & IFF_VNET_HDR)
- tun->flags |= TUN_VNET_HDR;
- else
- tun->flags &= ~TUN_VNET_HDR;
-
- if (ifr->ifr_flags & IFF_MULTI_QUEUE)
- tun->flags |= TUN_TAP_MQ;
- else
- tun->flags &= ~TUN_TAP_MQ;
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
/* Make sure persistent devices do not get stuck in
* xoff state.
ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
- if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
+ if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
- * TUNSETIFF. */
- return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
- IFF_VNET_HDR | IFF_MULTI_QUEUE,
+ * TUNSETIFF.
+ */
+ return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
(unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE)
return tun_set_queue(file, &ifr);
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
- if (arg && !(tun->flags & TUN_PERSIST)) {
- tun->flags |= TUN_PERSIST;
+ if (arg && !(tun->flags & IFF_PERSIST)) {
+ tun->flags |= IFF_PERSIST;
__module_get(THIS_MODULE);
}
- if (!arg && (tun->flags & TUN_PERSIST)) {
- tun->flags &= ~TUN_PERSIST;
+ if (!arg && (tun->flags & IFF_PERSIST)) {
+ tun->flags &= ~IFF_PERSIST;
module_put(THIS_MODULE);
}
case TUNSETTXFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = update_filter(&tun->txflt, (void __user *)arg);
break;
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
case TUNDETACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = 0;
tun_detach_filter(tun, tun->numqueues);
case TUNGETFILTER:
ret = -EINVAL;
- if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
}
#ifdef CONFIG_PROC_FS
-static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
+static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
{
struct tun_struct *tun;
struct ifreq ifr;
if (tun)
tun_put(tun);
- return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
+ seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
}
#endif
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
- case TUN_TUN_DEV:
+ case IFF_TUN:
strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
- case TUN_TAP_DEV:
+ case IFF_TAP:
strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
sc, resp->response, resp->status, resp->sense_len);
sc->result = resp->status;
- virtscsi_compute_resid(sc, resp->resid);
+ virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
switch (resp->response) {
case VIRTIO_SCSI_S_OK:
set_host_byte(sc, DID_OK);
break;
}
- WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
+ WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
+ VIRTIO_SCSI_SENSE_SIZE);
if (sc->sense_buffer) {
memcpy(sc->sense_buffer, resp->sense,
- min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
+ min_t(u32,
+ virtio32_to_cpu(vscsi->vdev, resp->sense_len),
+ VIRTIO_SCSI_SENSE_SIZE));
if (resp->sense_len)
set_driver_byte(sc, DRIVER_SENSE);
}
unsigned int target = event->lun[1];
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
- switch (event->reason) {
+ switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
scsi_add_device(shost, 0, target, lun);
break;
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
unsigned int target = event->lun[1];
unsigned int lun = (event->lun[2] << 8) | event->lun[3];
- u8 asc = event->reason & 255;
- u8 ascq = event->reason >> 8;
+ u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
+ u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
sdev = scsi_device_lookup(shost, 0, target, lun);
if (!sdev) {
struct virtio_scsi *vscsi = event_node->vscsi;
struct virtio_scsi_event *event = &event_node->event;
- if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
- event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
+ if (event->event &
+ cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
+ event->event &= ~cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_EVENTS_MISSED);
scsi_scan_host(virtio_scsi_host(vscsi->vdev));
}
- switch (event->event) {
+ switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
case VIRTIO_SCSI_T_NO_EVENT:
break;
case VIRTIO_SCSI_T_TRANSPORT_RESET:
return err;
}
- static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
+ static void virtio_scsi_init_hdr(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req *cmd,
struct scsi_cmnd *sc)
{
cmd->lun[0] = 1;
cmd->lun[1] = sc->device->id;
cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
cmd->lun[3] = sc->device->lun & 0xff;
- cmd->tag = (unsigned long)sc;
+ cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
cmd->prio = 0;
cmd->crn = 0;
}
- static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
+ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
+ struct virtio_scsi_cmd_req_pi *cmd_pi,
struct scsi_cmnd *sc)
{
struct request *rq = sc->request;
struct blk_integrity *bi;
- virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
+ virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
if (!rq || !scsi_prot_sg_count(sc))
return;
bi = blk_get_integrity(rq->rq_disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
- cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
+ cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
+ blk_rq_sectors(rq) *
+ bi->tuple_size);
else if (sc->sc_data_direction == DMA_FROM_DEVICE)
- cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
+ cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
+ blk_rq_sectors(rq) *
+ bi->tuple_size);
}
static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
- virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
+ virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd_pi);
} else {
- virtio_scsi_init_hdr(&cmd->req.cmd, sc);
+ virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
req_size = sizeof(cmd->req.cmd);
}
return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
}
+static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
+ struct scsi_cmnd *sc)
+{
+ u32 tag = blk_mq_unique_tag(sc->request);
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+
+ return &vscsi->req_vqs[hwq];
+}
+
static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
struct virtio_scsi_target_state *tgt)
{
struct virtio_scsi *vscsi = shost_priv(sh);
struct virtio_scsi_target_state *tgt =
scsi_target(sc->device)->hostdata;
- struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
+ struct virtio_scsi_vq *req_vq;
+
+ if (shost_use_blk_mq(sh))
+ req_vq = virtscsi_pick_vq_mq(vscsi, sc);
+ else
+ req_vq = virtscsi_pick_vq(vscsi, tgt);
return virtscsi_queuecommand(vscsi, req_vq, sc);
}
cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
- .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
+ .subtype = cpu_to_virtio32(vscsi->vdev,
+ VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
.lun[0] = 1,
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
* virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
* @sdev: Virtscsi target whose queue depth to change
* @qdepth: New queue depth
- * @reason: Reason for the queue depth change.
*/
-static int virtscsi_change_queue_depth(struct scsi_device *sdev,
- int qdepth,
- int reason)
+static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
struct Scsi_Host *shost = sdev->host;
int max_depth = shost->cmd_per_lun;
- switch (reason) {
- case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */
- scsi_track_queue_full(sdev, qdepth);
- break;
- case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */
- case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */
- scsi_adjust_queue_depth(sdev,
- scsi_get_tag_type(sdev),
- min(max_depth, qdepth));
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return sdev->queue_depth;
+ return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
}
static int virtscsi_abort(struct scsi_cmnd *sc)
.lun[1] = sc->device->id,
.lun[2] = (sc->device->lun >> 8) | 0x40,
.lun[3] = sc->device->lun & 0xff,
- .tag = (unsigned long)sc,
+ .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
};
return virtscsi_tmf(vscsi, cmd);
}
.use_clustering = ENABLE_CLUSTERING,
.target_alloc = virtscsi_target_alloc,
.target_destroy = virtscsi_target_destroy,
+ .track_queue_depth = 1,
};
static struct scsi_host_template virtscsi_host_template_multi = {
.use_clustering = ENABLE_CLUSTERING,
.target_alloc = virtscsi_target_alloc,
.target_destroy = virtscsi_target_destroy,
+ .track_queue_depth = 1,
};
#define virtscsi_config_get(vdev, fld) \
shost->max_id = num_targets;
shost->max_channel = 0;
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+ shost->nr_hw_queues = num_queues;
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |