]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/vhost/scsi.c
vhost-scsi: Remove unnecessary forward struct vhost_scsi declaration
[karo-tx-linux.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2012 RisingTide Systems LLC.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51
52 #include "vhost.c"
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58
59 struct vhost_scsi_inflight {
60         /* Wait for the flush operation to finish */
61         struct completion comp;
62         /* Refcount for the inflight reqs */
63         struct kref kref;
64 };
65
66 struct tcm_vhost_cmd {
67         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
68         int tvc_vq_desc;
69         /* virtio-scsi initiator task attribute */
70         int tvc_task_attr;
71         /* virtio-scsi initiator data direction */
72         enum dma_data_direction tvc_data_direction;
73         /* Expected data transfer length from virtio-scsi header */
74         u32 tvc_exp_data_len;
75         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
76         u64 tvc_tag;
77         /* The number of scatterlists associated with this cmd */
78         u32 tvc_sgl_count;
79         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
80         u32 tvc_lun;
81         /* Pointer to the SGL formatted memory from virtio-scsi */
82         struct scatterlist *tvc_sgl;
83         /* Pointer to response */
84         struct virtio_scsi_cmd_resp __user *tvc_resp;
85         /* Pointer to vhost_scsi for our device */
86         struct vhost_scsi *tvc_vhost;
87         /* Pointer to vhost_virtqueue for the cmd */
88         struct vhost_virtqueue *tvc_vq;
89         /* Pointer to vhost nexus memory */
90         struct tcm_vhost_nexus *tvc_nexus;
91         /* The TCM I/O descriptor that is accessed via container_of() */
92         struct se_cmd tvc_se_cmd;
93         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
94         struct work_struct work;
95         /* Copy of the incoming SCSI command descriptor block (CDB) */
96         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
97         /* Sense buffer that will be mapped into outgoing status */
98         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
99         /* Completed commands list, serviced from vhost worker thread */
100         struct llist_node tvc_completion_list;
101         /* Used to track inflight cmd */
102         struct vhost_scsi_inflight *inflight;
103 };
104
105 struct tcm_vhost_nexus {
106         /* Pointer to TCM session for I_T Nexus */
107         struct se_session *tvn_se_sess;
108 };
109
110 struct tcm_vhost_nacl {
111         /* Binary World Wide unique Port Name for Vhost Initiator port */
112         u64 iport_wwpn;
113         /* ASCII formatted WWPN for Sas Initiator port */
114         char iport_name[TCM_VHOST_NAMELEN];
115         /* Returned by tcm_vhost_make_nodeacl() */
116         struct se_node_acl se_node_acl;
117 };
118
119 struct tcm_vhost_tpg {
120         /* Vhost port target portal group tag for TCM */
121         u16 tport_tpgt;
122         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
123         int tv_tpg_port_count;
124         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
125         int tv_tpg_vhost_count;
126         /* list for tcm_vhost_list */
127         struct list_head tv_tpg_list;
128         /* Used to protect access for tpg_nexus */
129         struct mutex tv_tpg_mutex;
130         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
131         struct tcm_vhost_nexus *tpg_nexus;
132         /* Pointer back to tcm_vhost_tport */
133         struct tcm_vhost_tport *tport;
134         /* Returned by tcm_vhost_make_tpg() */
135         struct se_portal_group se_tpg;
136         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
137         struct vhost_scsi *vhost_scsi;
138 };
139
140 struct tcm_vhost_tport {
141         /* SCSI protocol the tport is providing */
142         u8 tport_proto_id;
143         /* Binary World Wide unique Port Name for Vhost Target port */
144         u64 tport_wwpn;
145         /* ASCII formatted WWPN for Vhost Target port */
146         char tport_name[TCM_VHOST_NAMELEN];
147         /* Returned by tcm_vhost_make_tport() */
148         struct se_wwn tport_wwn;
149 };
150
151 struct tcm_vhost_evt {
152         /* event to be sent to guest */
153         struct virtio_scsi_event event;
154         /* event list, serviced from vhost worker thread */
155         struct llist_node list;
156 };
157
158 enum {
159         VHOST_SCSI_VQ_CTL = 0,
160         VHOST_SCSI_VQ_EVT = 1,
161         VHOST_SCSI_VQ_IO = 2,
162 };
163
164 enum {
165         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
166 };
167
168 #define VHOST_SCSI_MAX_TARGET   256
169 #define VHOST_SCSI_MAX_VQ       128
170 #define VHOST_SCSI_MAX_EVENT    128
171
172 struct vhost_scsi_virtqueue {
173         struct vhost_virtqueue vq;
174         /*
175          * Reference counting for inflight reqs, used for flush operation. At
176          * each time, one reference tracks new commands submitted, while we
177          * wait for another one to reach 0.
178          */
179         struct vhost_scsi_inflight inflights[2];
180         /*
181          * Indicate current inflight in use, protected by vq->mutex.
182          * Writers must also take dev mutex and flush under it.
183          */
184         int inflight_idx;
185 };
186
187 struct vhost_scsi {
188         /* Protected by vhost_scsi->dev.mutex */
189         struct tcm_vhost_tpg **vs_tpg;
190         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
191
192         struct vhost_dev dev;
193         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
194
195         struct vhost_work vs_completion_work; /* cmd completion work item */
196         struct llist_head vs_completion_list; /* cmd completion queue */
197
198         struct vhost_work vs_event_work; /* evt injection work item */
199         struct llist_head vs_event_list; /* evt injection queue */
200
201         bool vs_events_missed; /* any missed events, protected by vq->mutex */
202         int vs_events_nr; /* num of pending events, protected by vq->mutex */
203 };
204
205 /* Local pointer to allocated TCM configfs fabric module */
206 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
207
208 static struct workqueue_struct *tcm_vhost_workqueue;
209
210 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
211 static DEFINE_MUTEX(tcm_vhost_mutex);
212 static LIST_HEAD(tcm_vhost_list);
213
214 static int iov_num_pages(struct iovec *iov)
215 {
216         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
217                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
218 }
219
220 void tcm_vhost_done_inflight(struct kref *kref)
221 {
222         struct vhost_scsi_inflight *inflight;
223
224         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
225         complete(&inflight->comp);
226 }
227
228 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
229                                     struct vhost_scsi_inflight *old_inflight[])
230 {
231         struct vhost_scsi_inflight *new_inflight;
232         struct vhost_virtqueue *vq;
233         int idx, i;
234
235         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
236                 vq = &vs->vqs[i].vq;
237
238                 mutex_lock(&vq->mutex);
239
240                 /* store old infight */
241                 idx = vs->vqs[i].inflight_idx;
242                 if (old_inflight)
243                         old_inflight[i] = &vs->vqs[i].inflights[idx];
244
245                 /* setup new infight */
246                 vs->vqs[i].inflight_idx = idx ^ 1;
247                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
248                 kref_init(&new_inflight->kref);
249                 init_completion(&new_inflight->comp);
250
251                 mutex_unlock(&vq->mutex);
252         }
253 }
254
255 static struct vhost_scsi_inflight *
256 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
257 {
258         struct vhost_scsi_inflight *inflight;
259         struct vhost_scsi_virtqueue *svq;
260
261         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
262         inflight = &svq->inflights[svq->inflight_idx];
263         kref_get(&inflight->kref);
264
265         return inflight;
266 }
267
268 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
269 {
270         kref_put(&inflight->kref, tcm_vhost_done_inflight);
271 }
272
273 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
274 {
275         return 1;
276 }
277
278 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
279 {
280         return 0;
281 }
282
283 static char *tcm_vhost_get_fabric_name(void)
284 {
285         return "vhost";
286 }
287
288 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
289 {
290         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
291                                 struct tcm_vhost_tpg, se_tpg);
292         struct tcm_vhost_tport *tport = tpg->tport;
293
294         switch (tport->tport_proto_id) {
295         case SCSI_PROTOCOL_SAS:
296                 return sas_get_fabric_proto_ident(se_tpg);
297         case SCSI_PROTOCOL_FCP:
298                 return fc_get_fabric_proto_ident(se_tpg);
299         case SCSI_PROTOCOL_ISCSI:
300                 return iscsi_get_fabric_proto_ident(se_tpg);
301         default:
302                 pr_err("Unknown tport_proto_id: 0x%02x, using"
303                         " SAS emulation\n", tport->tport_proto_id);
304                 break;
305         }
306
307         return sas_get_fabric_proto_ident(se_tpg);
308 }
309
310 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
311 {
312         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
313                                 struct tcm_vhost_tpg, se_tpg);
314         struct tcm_vhost_tport *tport = tpg->tport;
315
316         return &tport->tport_name[0];
317 }
318
319 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
320 {
321         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
322                                 struct tcm_vhost_tpg, se_tpg);
323         return tpg->tport_tpgt;
324 }
325
326 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
327 {
328         return 1;
329 }
330
331 static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
332         struct se_node_acl *se_nacl,
333         struct t10_pr_registration *pr_reg,
334         int *format_code,
335         unsigned char *buf)
336 {
337         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
338                                 struct tcm_vhost_tpg, se_tpg);
339         struct tcm_vhost_tport *tport = tpg->tport;
340
341         switch (tport->tport_proto_id) {
342         case SCSI_PROTOCOL_SAS:
343                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
344                                         format_code, buf);
345         case SCSI_PROTOCOL_FCP:
346                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
347                                         format_code, buf);
348         case SCSI_PROTOCOL_ISCSI:
349                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
350                                         format_code, buf);
351         default:
352                 pr_err("Unknown tport_proto_id: 0x%02x, using"
353                         " SAS emulation\n", tport->tport_proto_id);
354                 break;
355         }
356
357         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
358                         format_code, buf);
359 }
360
361 static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
362         struct se_node_acl *se_nacl,
363         struct t10_pr_registration *pr_reg,
364         int *format_code)
365 {
366         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
367                                 struct tcm_vhost_tpg, se_tpg);
368         struct tcm_vhost_tport *tport = tpg->tport;
369
370         switch (tport->tport_proto_id) {
371         case SCSI_PROTOCOL_SAS:
372                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
373                                         format_code);
374         case SCSI_PROTOCOL_FCP:
375                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
376                                         format_code);
377         case SCSI_PROTOCOL_ISCSI:
378                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
379                                         format_code);
380         default:
381                 pr_err("Unknown tport_proto_id: 0x%02x, using"
382                         " SAS emulation\n", tport->tport_proto_id);
383                 break;
384         }
385
386         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
387                         format_code);
388 }
389
390 static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
391         const char *buf,
392         u32 *out_tid_len,
393         char **port_nexus_ptr)
394 {
395         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
396                                 struct tcm_vhost_tpg, se_tpg);
397         struct tcm_vhost_tport *tport = tpg->tport;
398
399         switch (tport->tport_proto_id) {
400         case SCSI_PROTOCOL_SAS:
401                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
402                                         port_nexus_ptr);
403         case SCSI_PROTOCOL_FCP:
404                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
405                                         port_nexus_ptr);
406         case SCSI_PROTOCOL_ISCSI:
407                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
408                                         port_nexus_ptr);
409         default:
410                 pr_err("Unknown tport_proto_id: 0x%02x, using"
411                         " SAS emulation\n", tport->tport_proto_id);
412                 break;
413         }
414
415         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
416                         port_nexus_ptr);
417 }
418
419 static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
420         struct se_portal_group *se_tpg)
421 {
422         struct tcm_vhost_nacl *nacl;
423
424         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
425         if (!nacl) {
426                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
427                 return NULL;
428         }
429
430         return &nacl->se_node_acl;
431 }
432
433 static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
434         struct se_node_acl *se_nacl)
435 {
436         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
437                         struct tcm_vhost_nacl, se_node_acl);
438         kfree(nacl);
439 }
440
441 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
442 {
443         return 1;
444 }
445
446 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
447 {
448         return;
449 }
450
451 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
452 {
453         return 0;
454 }
455
456 static void tcm_vhost_close_session(struct se_session *se_sess)
457 {
458         return;
459 }
460
461 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
462 {
463         return 0;
464 }
465
466 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
467 {
468         /* Go ahead and process the write immediately */
469         target_execute_cmd(se_cmd);
470         return 0;
471 }
472
473 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
474 {
475         return 0;
476 }
477
478 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
479 {
480         return;
481 }
482
483 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
484 {
485         return 0;
486 }
487
488 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
489 {
490         return 0;
491 }
492
493 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
494 {
495         struct vhost_scsi *vs = tv_cmd->tvc_vhost;
496
497         llist_add(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
498
499         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
500 }
501
502 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
503 {
504         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
505                                 struct tcm_vhost_cmd, tvc_se_cmd);
506         vhost_scsi_complete_cmd(tv_cmd);
507         return 0;
508 }
509
510 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
511 {
512         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
513                                 struct tcm_vhost_cmd, tvc_se_cmd);
514         vhost_scsi_complete_cmd(tv_cmd);
515         return 0;
516 }
517
518 static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
519 {
520         return 0;
521 }
522
523 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
524 {
525         vs->vs_events_nr--;
526         kfree(evt);
527 }
528
529 static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
530         u32 event, u32 reason)
531 {
532         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
533         struct tcm_vhost_evt *evt;
534
535         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
536                 vs->vs_events_missed = true;
537                 return NULL;
538         }
539
540         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
541         if (!evt) {
542                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
543                 vs->vs_events_missed = true;
544                 return NULL;
545         }
546
547         evt->event.event = event;
548         evt->event.reason = reason;
549         vs->vs_events_nr++;
550
551         return evt;
552 }
553
554 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
555 {
556         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
557
558         /* TODO locking against target/backend threads? */
559         transport_generic_free_cmd(se_cmd, 1);
560
561         if (tv_cmd->tvc_sgl_count) {
562                 u32 i;
563                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
564                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
565
566                 kfree(tv_cmd->tvc_sgl);
567         }
568
569         tcm_vhost_put_inflight(tv_cmd->inflight);
570
571         kfree(tv_cmd);
572 }
573
574 static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
575         struct tcm_vhost_evt *evt)
576 {
577         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
578         struct virtio_scsi_event *event = &evt->event;
579         struct virtio_scsi_event __user *eventp;
580         unsigned out, in;
581         int head, ret;
582
583         if (!vq->private_data) {
584                 vs->vs_events_missed = true;
585                 return;
586         }
587
588 again:
589         vhost_disable_notify(&vs->dev, vq);
590         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
591                         ARRAY_SIZE(vq->iov), &out, &in,
592                         NULL, NULL);
593         if (head < 0) {
594                 vs->vs_events_missed = true;
595                 return;
596         }
597         if (head == vq->num) {
598                 if (vhost_enable_notify(&vs->dev, vq))
599                         goto again;
600                 vs->vs_events_missed = true;
601                 return;
602         }
603
604         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
605                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
606                                 vq->iov[out].iov_len);
607                 vs->vs_events_missed = true;
608                 return;
609         }
610
611         if (vs->vs_events_missed) {
612                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
613                 vs->vs_events_missed = false;
614         }
615
616         eventp = vq->iov[out].iov_base;
617         ret = __copy_to_user(eventp, event, sizeof(*event));
618         if (!ret)
619                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
620         else
621                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
622 }
623
624 static void tcm_vhost_evt_work(struct vhost_work *work)
625 {
626         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
627                                         vs_event_work);
628         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
629         struct tcm_vhost_evt *evt;
630         struct llist_node *llnode;
631
632         mutex_lock(&vq->mutex);
633         llnode = llist_del_all(&vs->vs_event_list);
634         while (llnode) {
635                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
636                 llnode = llist_next(llnode);
637                 tcm_vhost_do_evt_work(vs, evt);
638                 tcm_vhost_free_evt(vs, evt);
639         }
640         mutex_unlock(&vq->mutex);
641 }
642
643 /* Fill in status and signal that we are done processing this command
644  *
645  * This is scheduled in the vhost work queue so we are called with the owner
646  * process mm and can access the vring.
647  */
648 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
649 {
650         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
651                                         vs_completion_work);
652         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
653         struct virtio_scsi_cmd_resp v_rsp;
654         struct tcm_vhost_cmd *tv_cmd;
655         struct llist_node *llnode;
656         struct se_cmd *se_cmd;
657         int ret, vq;
658
659         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
660         llnode = llist_del_all(&vs->vs_completion_list);
661         while (llnode) {
662                 tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
663                                      tvc_completion_list);
664                 llnode = llist_next(llnode);
665                 se_cmd = &tv_cmd->tvc_se_cmd;
666
667                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
668                         tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
669
670                 memset(&v_rsp, 0, sizeof(v_rsp));
671                 v_rsp.resid = se_cmd->residual_count;
672                 /* TODO is status_qualifier field needed? */
673                 v_rsp.status = se_cmd->scsi_status;
674                 v_rsp.sense_len = se_cmd->scsi_sense_length;
675                 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
676                        v_rsp.sense_len);
677                 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
678                 if (likely(ret == 0)) {
679                         struct vhost_scsi_virtqueue *q;
680                         vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
681                         q = container_of(tv_cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
682                         vq = q - vs->vqs;
683                         __set_bit(vq, signal);
684                 } else
685                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
686
687                 vhost_scsi_free_cmd(tv_cmd);
688         }
689
690         vq = -1;
691         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
692                 < VHOST_SCSI_MAX_VQ)
693                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
694 }
695
696 static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
697         struct vhost_virtqueue *vq,
698         struct tcm_vhost_tpg *tv_tpg,
699         struct virtio_scsi_cmd_req *v_req,
700         u32 exp_data_len,
701         int data_direction)
702 {
703         struct tcm_vhost_cmd *tv_cmd;
704         struct tcm_vhost_nexus *tv_nexus;
705
706         tv_nexus = tv_tpg->tpg_nexus;
707         if (!tv_nexus) {
708                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
709                 return ERR_PTR(-EIO);
710         }
711
712         tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
713         if (!tv_cmd) {
714                 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
715                 return ERR_PTR(-ENOMEM);
716         }
717         tv_cmd->tvc_tag = v_req->tag;
718         tv_cmd->tvc_task_attr = v_req->task_attr;
719         tv_cmd->tvc_exp_data_len = exp_data_len;
720         tv_cmd->tvc_data_direction = data_direction;
721         tv_cmd->tvc_nexus = tv_nexus;
722         tv_cmd->inflight = tcm_vhost_get_inflight(vq);
723
724         return tv_cmd;
725 }
726
727 /*
728  * Map a user memory range into a scatterlist
729  *
730  * Returns the number of scatterlist entries used or -errno on error.
731  */
732 static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
733         unsigned int sgl_count, struct iovec *iov, int write)
734 {
735         unsigned int npages = 0, pages_nr, offset, nbytes;
736         struct scatterlist *sg = sgl;
737         void __user *ptr = iov->iov_base;
738         size_t len = iov->iov_len;
739         struct page **pages;
740         int ret, i;
741
742         pages_nr = iov_num_pages(iov);
743         if (pages_nr > sgl_count)
744                 return -ENOBUFS;
745
746         pages = kmalloc(pages_nr * sizeof(struct page *), GFP_KERNEL);
747         if (!pages)
748                 return -ENOMEM;
749
750         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
751         /* No pages were pinned */
752         if (ret < 0)
753                 goto out;
754         /* Less pages pinned than wanted */
755         if (ret != pages_nr) {
756                 for (i = 0; i < ret; i++)
757                         put_page(pages[i]);
758                 ret = -EFAULT;
759                 goto out;
760         }
761
762         while (len > 0) {
763                 offset = (uintptr_t)ptr & ~PAGE_MASK;
764                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
765                 sg_set_page(sg, pages[npages], nbytes, offset);
766                 ptr += nbytes;
767                 len -= nbytes;
768                 sg++;
769                 npages++;
770         }
771
772 out:
773         kfree(pages);
774         return ret;
775 }
776
777 static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
778         struct iovec *iov, unsigned int niov, int write)
779 {
780         int ret;
781         unsigned int i;
782         u32 sgl_count;
783         struct scatterlist *sg;
784
785         /*
786          * Find out how long sglist needs to be
787          */
788         sgl_count = 0;
789         for (i = 0; i < niov; i++)
790                 sgl_count += iov_num_pages(&iov[i]);
791
792         /* TODO overflow checking */
793
794         sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
795         if (!sg)
796                 return -ENOMEM;
797         pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
798                sg, sgl_count, !sg);
799         sg_init_table(sg, sgl_count);
800
801         tv_cmd->tvc_sgl = sg;
802         tv_cmd->tvc_sgl_count = sgl_count;
803
804         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
805         for (i = 0; i < niov; i++) {
806                 ret = vhost_scsi_map_to_sgl(sg, sgl_count, &iov[i], write);
807                 if (ret < 0) {
808                         for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
809                                 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
810                         kfree(tv_cmd->tvc_sgl);
811                         tv_cmd->tvc_sgl = NULL;
812                         tv_cmd->tvc_sgl_count = 0;
813                         return ret;
814                 }
815
816                 sg += ret;
817                 sgl_count -= ret;
818         }
819         return 0;
820 }
821
822 static void tcm_vhost_submission_work(struct work_struct *work)
823 {
824         struct tcm_vhost_cmd *tv_cmd =
825                 container_of(work, struct tcm_vhost_cmd, work);
826         struct tcm_vhost_nexus *tv_nexus;
827         struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
828         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
829         int rc, sg_no_bidi = 0;
830
831         if (tv_cmd->tvc_sgl_count) {
832                 sg_ptr = tv_cmd->tvc_sgl;
833 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
834 #if 0
835                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
836                         sg_bidi_ptr = NULL;
837                         sg_no_bidi = 0;
838                 }
839 #endif
840         } else {
841                 sg_ptr = NULL;
842         }
843         tv_nexus = tv_cmd->tvc_nexus;
844
845         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
846                         tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
847                         tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
848                         tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
849                         0, sg_ptr, tv_cmd->tvc_sgl_count,
850                         sg_bidi_ptr, sg_no_bidi);
851         if (rc < 0) {
852                 transport_send_check_condition_and_sense(se_cmd,
853                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
854                 transport_generic_free_cmd(se_cmd, 0);
855         }
856 }
857
858 static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
859         struct vhost_virtqueue *vq, int head, unsigned out)
860 {
861         struct virtio_scsi_cmd_resp __user *resp;
862         struct virtio_scsi_cmd_resp rsp;
863         int ret;
864
865         memset(&rsp, 0, sizeof(rsp));
866         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
867         resp = vq->iov[out].iov_base;
868         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
869         if (!ret)
870                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
871         else
872                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
873 }
874
875 static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
876         struct vhost_virtqueue *vq)
877 {
878         struct tcm_vhost_tpg **vs_tpg;
879         struct virtio_scsi_cmd_req v_req;
880         struct tcm_vhost_tpg *tv_tpg;
881         struct tcm_vhost_cmd *tv_cmd;
882         u32 exp_data_len, data_first, data_num, data_direction;
883         unsigned out, in, i;
884         int head, ret;
885         u8 target;
886
887         /*
888          * We can handle the vq only after the endpoint is setup by calling the
889          * VHOST_SCSI_SET_ENDPOINT ioctl.
890          *
891          * TODO: Check that we are running from vhost_worker which acts
892          * as read-side critical section for vhost kind of RCU.
893          * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
894          */
895         vs_tpg = rcu_dereference_check(vq->private_data, 1);
896         if (!vs_tpg)
897                 return;
898
899         mutex_lock(&vq->mutex);
900         vhost_disable_notify(&vs->dev, vq);
901
902         for (;;) {
903                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
904                                         ARRAY_SIZE(vq->iov), &out, &in,
905                                         NULL, NULL);
906                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
907                                         head, out, in);
908                 /* On error, stop handling until the next kick. */
909                 if (unlikely(head < 0))
910                         break;
911                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
912                 if (head == vq->num) {
913                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
914                                 vhost_disable_notify(&vs->dev, vq);
915                                 continue;
916                         }
917                         break;
918                 }
919
920 /* FIXME: BIDI operation */
921                 if (out == 1 && in == 1) {
922                         data_direction = DMA_NONE;
923                         data_first = 0;
924                         data_num = 0;
925                 } else if (out == 1 && in > 1) {
926                         data_direction = DMA_FROM_DEVICE;
927                         data_first = out + 1;
928                         data_num = in - 1;
929                 } else if (out > 1 && in == 1) {
930                         data_direction = DMA_TO_DEVICE;
931                         data_first = 1;
932                         data_num = out - 1;
933                 } else {
934                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
935                                         out, in);
936                         break;
937                 }
938
939                 /*
940                  * Check for a sane resp buffer so we can report errors to
941                  * the guest.
942                  */
943                 if (unlikely(vq->iov[out].iov_len !=
944                                         sizeof(struct virtio_scsi_cmd_resp))) {
945                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
946                                 " bytes\n", vq->iov[out].iov_len);
947                         break;
948                 }
949
950                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
951                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
952                                 " bytes\n", vq->iov[0].iov_len);
953                         break;
954                 }
955                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
956                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
957                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
958                                 sizeof(v_req));
959                 if (unlikely(ret)) {
960                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
961                         break;
962                 }
963
964                 /* Extract the tpgt */
965                 target = v_req.lun[1];
966                 tv_tpg = ACCESS_ONCE(vs_tpg[target]);
967
968                 /* Target does not exist, fail the request */
969                 if (unlikely(!tv_tpg)) {
970                         vhost_scsi_send_bad_target(vs, vq, head, out);
971                         continue;
972                 }
973
974                 exp_data_len = 0;
975                 for (i = 0; i < data_num; i++)
976                         exp_data_len += vq->iov[data_first + i].iov_len;
977
978                 tv_cmd = vhost_scsi_allocate_cmd(vq, tv_tpg, &v_req,
979                                         exp_data_len, data_direction);
980                 if (IS_ERR(tv_cmd)) {
981                         vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
982                                         PTR_ERR(tv_cmd));
983                         goto err_cmd;
984                 }
985                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
986                         ": %d\n", tv_cmd, exp_data_len, data_direction);
987
988                 tv_cmd->tvc_vhost = vs;
989                 tv_cmd->tvc_vq = vq;
990                 tv_cmd->tvc_resp = vq->iov[out].iov_base;
991
992                 /*
993                  * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
994                  * that will be used by tcm_vhost_new_cmd_map() and down into
995                  * target_setup_cmd_from_cdb()
996                  */
997                 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
998                 /*
999                  * Check that the recieved CDB size does not exceeded our
1000                  * hardcoded max for tcm_vhost
1001                  */
1002                 /* TODO what if cdb was too small for varlen cdb header? */
1003                 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
1004                                         TCM_VHOST_MAX_CDB_SIZE)) {
1005                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1006                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1007                                 scsi_command_size(tv_cmd->tvc_cdb),
1008                                 TCM_VHOST_MAX_CDB_SIZE);
1009                         goto err_free;
1010                 }
1011                 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1012
1013                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1014                         tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
1015
1016                 if (data_direction != DMA_NONE) {
1017                         ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
1018                                         &vq->iov[data_first], data_num,
1019                                         data_direction == DMA_TO_DEVICE);
1020                         if (unlikely(ret)) {
1021                                 vq_err(vq, "Failed to map iov to sgl\n");
1022                                 goto err_free;
1023                         }
1024                 }
1025
1026                 /*
1027                  * Save the descriptor from vhost_get_vq_desc() to be used to
1028                  * complete the virtio-scsi request in TCM callback context via
1029                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1030                  */
1031                 tv_cmd->tvc_vq_desc = head;
1032                 /*
1033                  * Dispatch tv_cmd descriptor for cmwq execution in process
1034                  * context provided by tcm_vhost_workqueue.  This also ensures
1035                  * tv_cmd is executed on the same kworker CPU as this vhost
1036                  * thread to gain positive L2 cache locality effects..
1037                  */
1038                 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
1039                 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
1040         }
1041
1042         mutex_unlock(&vq->mutex);
1043         return;
1044
1045 err_free:
1046         vhost_scsi_free_cmd(tv_cmd);
1047 err_cmd:
1048         vhost_scsi_send_bad_target(vs, vq, head, out);
1049         mutex_unlock(&vq->mutex);
1050 }
1051
1052 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1053 {
1054         pr_debug("%s: The handling func for control queue.\n", __func__);
1055 }
1056
1057 static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
1058         struct se_lun *lun, u32 event, u32 reason)
1059 {
1060         struct tcm_vhost_evt *evt;
1061
1062         evt = tcm_vhost_allocate_evt(vs, event, reason);
1063         if (!evt)
1064                 return;
1065
1066         if (tpg && lun) {
1067                 /* TODO: share lun setup code with virtio-scsi.ko */
1068                 /*
1069                  * Note: evt->event is zeroed when we allocate it and
1070                  * lun[4-7] need to be zero according to virtio-scsi spec.
1071                  */
1072                 evt->event.lun[0] = 0x01;
1073                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1074                 if (lun->unpacked_lun >= 256)
1075                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1076                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1077         }
1078
1079         llist_add(&evt->list, &vs->vs_event_list);
1080         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1081 }
1082
1083 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1084 {
1085         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1086                                                 poll.work);
1087         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1088
1089         mutex_lock(&vq->mutex);
1090         if (!vq->private_data)
1091                 goto out;
1092
1093         if (vs->vs_events_missed)
1094                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1095 out:
1096         mutex_unlock(&vq->mutex);
1097 }
1098
1099 static void vhost_scsi_handle_kick(struct vhost_work *work)
1100 {
1101         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1102                                                 poll.work);
1103         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1104
1105         vhost_scsi_handle_vq(vs, vq);
1106 }
1107
1108 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1109 {
1110         vhost_poll_flush(&vs->vqs[index].vq.poll);
1111 }
1112
1113 /* Callers must hold dev mutex */
1114 static void vhost_scsi_flush(struct vhost_scsi *vs)
1115 {
1116         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1117         int i;
1118
1119         /* Init new inflight and remember the old inflight */
1120         tcm_vhost_init_inflight(vs, old_inflight);
1121
1122         /*
1123          * The inflight->kref was initialized to 1. We decrement it here to
1124          * indicate the start of the flush operation so that it will reach 0
1125          * when all the reqs are finished.
1126          */
1127         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1128                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1129
1130         /* Flush both the vhost poll and vhost work */
1131         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1132                 vhost_scsi_flush_vq(vs, i);
1133         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1134         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1135
1136         /* Wait for all reqs issued before the flush to be finished */
1137         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1138                 wait_for_completion(&old_inflight[i]->comp);
1139 }
1140
1141 /*
1142  * Called from vhost_scsi_ioctl() context to walk the list of available
1143  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1144  *
1145  *  The lock nesting rule is:
1146  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1147  */
1148 static int vhost_scsi_set_endpoint(
1149         struct vhost_scsi *vs,
1150         struct vhost_scsi_target *t)
1151 {
1152         struct tcm_vhost_tport *tv_tport;
1153         struct tcm_vhost_tpg *tv_tpg;
1154         struct tcm_vhost_tpg **vs_tpg;
1155         struct vhost_virtqueue *vq;
1156         int index, ret, i, len;
1157         bool match = false;
1158
1159         mutex_lock(&tcm_vhost_mutex);
1160         mutex_lock(&vs->dev.mutex);
1161
1162         /* Verify that ring has been setup correctly. */
1163         for (index = 0; index < vs->dev.nvqs; ++index) {
1164                 /* Verify that ring has been setup correctly. */
1165                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1166                         ret = -EFAULT;
1167                         goto out;
1168                 }
1169         }
1170
1171         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1172         vs_tpg = kzalloc(len, GFP_KERNEL);
1173         if (!vs_tpg) {
1174                 ret = -ENOMEM;
1175                 goto out;
1176         }
1177         if (vs->vs_tpg)
1178                 memcpy(vs_tpg, vs->vs_tpg, len);
1179
1180         list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
1181                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1182                 if (!tv_tpg->tpg_nexus) {
1183                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1184                         continue;
1185                 }
1186                 if (tv_tpg->tv_tpg_vhost_count != 0) {
1187                         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1188                         continue;
1189                 }
1190                 tv_tport = tv_tpg->tport;
1191
1192                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1193                         if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
1194                                 kfree(vs_tpg);
1195                                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1196                                 ret = -EEXIST;
1197                                 goto out;
1198                         }
1199                         tv_tpg->tv_tpg_vhost_count++;
1200                         tv_tpg->vhost_scsi = vs;
1201                         vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
1202                         smp_mb__after_atomic_inc();
1203                         match = true;
1204                 }
1205                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1206         }
1207
1208         if (match) {
1209                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1210                        sizeof(vs->vs_vhost_wwpn));
1211                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1212                         vq = &vs->vqs[i].vq;
1213                         /* Flushing the vhost_work acts as synchronize_rcu */
1214                         mutex_lock(&vq->mutex);
1215                         rcu_assign_pointer(vq->private_data, vs_tpg);
1216                         vhost_init_used(vq);
1217                         mutex_unlock(&vq->mutex);
1218                 }
1219                 ret = 0;
1220         } else {
1221                 ret = -EEXIST;
1222         }
1223
1224         /*
1225          * Act as synchronize_rcu to make sure access to
1226          * old vs->vs_tpg is finished.
1227          */
1228         vhost_scsi_flush(vs);
1229         kfree(vs->vs_tpg);
1230         vs->vs_tpg = vs_tpg;
1231
1232 out:
1233         mutex_unlock(&vs->dev.mutex);
1234         mutex_unlock(&tcm_vhost_mutex);
1235         return ret;
1236 }
1237
1238 static int vhost_scsi_clear_endpoint(
1239         struct vhost_scsi *vs,
1240         struct vhost_scsi_target *t)
1241 {
1242         struct tcm_vhost_tport *tv_tport;
1243         struct tcm_vhost_tpg *tv_tpg;
1244         struct vhost_virtqueue *vq;
1245         bool match = false;
1246         int index, ret, i;
1247         u8 target;
1248
1249         mutex_lock(&tcm_vhost_mutex);
1250         mutex_lock(&vs->dev.mutex);
1251         /* Verify that ring has been setup correctly. */
1252         for (index = 0; index < vs->dev.nvqs; ++index) {
1253                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1254                         ret = -EFAULT;
1255                         goto err_dev;
1256                 }
1257         }
1258
1259         if (!vs->vs_tpg) {
1260                 ret = 0;
1261                 goto err_dev;
1262         }
1263
1264         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1265                 target = i;
1266                 tv_tpg = vs->vs_tpg[target];
1267                 if (!tv_tpg)
1268                         continue;
1269
1270                 mutex_lock(&tv_tpg->tv_tpg_mutex);
1271                 tv_tport = tv_tpg->tport;
1272                 if (!tv_tport) {
1273                         ret = -ENODEV;
1274                         goto err_tpg;
1275                 }
1276
1277                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1278                         pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
1279                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1280                                 tv_tport->tport_name, tv_tpg->tport_tpgt,
1281                                 t->vhost_wwpn, t->vhost_tpgt);
1282                         ret = -EINVAL;
1283                         goto err_tpg;
1284                 }
1285                 tv_tpg->tv_tpg_vhost_count--;
1286                 tv_tpg->vhost_scsi = NULL;
1287                 vs->vs_tpg[target] = NULL;
1288                 match = true;
1289                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1290         }
1291         if (match) {
1292                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1293                         vq = &vs->vqs[i].vq;
1294                         /* Flushing the vhost_work acts as synchronize_rcu */
1295                         mutex_lock(&vq->mutex);
1296                         rcu_assign_pointer(vq->private_data, NULL);
1297                         mutex_unlock(&vq->mutex);
1298                 }
1299         }
1300         /*
1301          * Act as synchronize_rcu to make sure access to
1302          * old vs->vs_tpg is finished.
1303          */
1304         vhost_scsi_flush(vs);
1305         kfree(vs->vs_tpg);
1306         vs->vs_tpg = NULL;
1307         WARN_ON(vs->vs_events_nr);
1308         mutex_unlock(&vs->dev.mutex);
1309         mutex_unlock(&tcm_vhost_mutex);
1310         return 0;
1311
1312 err_tpg:
1313         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1314 err_dev:
1315         mutex_unlock(&vs->dev.mutex);
1316         mutex_unlock(&tcm_vhost_mutex);
1317         return ret;
1318 }
1319
1320 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1321 {
1322         if (features & ~VHOST_SCSI_FEATURES)
1323                 return -EOPNOTSUPP;
1324
1325         mutex_lock(&vs->dev.mutex);
1326         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1327             !vhost_log_access_ok(&vs->dev)) {
1328                 mutex_unlock(&vs->dev.mutex);
1329                 return -EFAULT;
1330         }
1331         vs->dev.acked_features = features;
1332         smp_wmb();
1333         vhost_scsi_flush(vs);
1334         mutex_unlock(&vs->dev.mutex);
1335         return 0;
1336 }
1337
1338 static int vhost_scsi_open(struct inode *inode, struct file *f)
1339 {
1340         struct vhost_scsi *s;
1341         struct vhost_virtqueue **vqs;
1342         int r, i;
1343
1344         s = kzalloc(sizeof(*s), GFP_KERNEL);
1345         if (!s)
1346                 return -ENOMEM;
1347
1348         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1349         if (!vqs) {
1350                 kfree(s);
1351                 return -ENOMEM;
1352         }
1353
1354         vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
1355         vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
1356
1357         s->vs_events_nr = 0;
1358         s->vs_events_missed = false;
1359
1360         vqs[VHOST_SCSI_VQ_CTL] = &s->vqs[VHOST_SCSI_VQ_CTL].vq;
1361         vqs[VHOST_SCSI_VQ_EVT] = &s->vqs[VHOST_SCSI_VQ_EVT].vq;
1362         s->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1363         s->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1364         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1365                 vqs[i] = &s->vqs[i].vq;
1366                 s->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1367         }
1368         r = vhost_dev_init(&s->dev, vqs, VHOST_SCSI_MAX_VQ);
1369
1370         tcm_vhost_init_inflight(s, NULL);
1371
1372         if (r < 0) {
1373                 kfree(vqs);
1374                 kfree(s);
1375                 return r;
1376         }
1377
1378         f->private_data = s;
1379         return 0;
1380 }
1381
1382 static int vhost_scsi_release(struct inode *inode, struct file *f)
1383 {
1384         struct vhost_scsi *s = f->private_data;
1385         struct vhost_scsi_target t;
1386
1387         mutex_lock(&s->dev.mutex);
1388         memcpy(t.vhost_wwpn, s->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1389         mutex_unlock(&s->dev.mutex);
1390         vhost_scsi_clear_endpoint(s, &t);
1391         vhost_dev_stop(&s->dev);
1392         vhost_dev_cleanup(&s->dev, false);
1393         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1394         vhost_scsi_flush(s);
1395         kfree(s->dev.vqs);
1396         kfree(s);
1397         return 0;
1398 }
1399
1400 static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
1401                                 unsigned long arg)
1402 {
1403         struct vhost_scsi *vs = f->private_data;
1404         struct vhost_scsi_target backend;
1405         void __user *argp = (void __user *)arg;
1406         u64 __user *featurep = argp;
1407         u32 __user *eventsp = argp;
1408         u32 events_missed;
1409         u64 features;
1410         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1411         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1412
1413         switch (ioctl) {
1414         case VHOST_SCSI_SET_ENDPOINT:
1415                 if (copy_from_user(&backend, argp, sizeof backend))
1416                         return -EFAULT;
1417                 if (backend.reserved != 0)
1418                         return -EOPNOTSUPP;
1419
1420                 return vhost_scsi_set_endpoint(vs, &backend);
1421         case VHOST_SCSI_CLEAR_ENDPOINT:
1422                 if (copy_from_user(&backend, argp, sizeof backend))
1423                         return -EFAULT;
1424                 if (backend.reserved != 0)
1425                         return -EOPNOTSUPP;
1426
1427                 return vhost_scsi_clear_endpoint(vs, &backend);
1428         case VHOST_SCSI_GET_ABI_VERSION:
1429                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1430                         return -EFAULT;
1431                 return 0;
1432         case VHOST_SCSI_SET_EVENTS_MISSED:
1433                 if (get_user(events_missed, eventsp))
1434                         return -EFAULT;
1435                 mutex_lock(&vq->mutex);
1436                 vs->vs_events_missed = events_missed;
1437                 mutex_unlock(&vq->mutex);
1438                 return 0;
1439         case VHOST_SCSI_GET_EVENTS_MISSED:
1440                 mutex_lock(&vq->mutex);
1441                 events_missed = vs->vs_events_missed;
1442                 mutex_unlock(&vq->mutex);
1443                 if (put_user(events_missed, eventsp))
1444                         return -EFAULT;
1445                 return 0;
1446         case VHOST_GET_FEATURES:
1447                 features = VHOST_SCSI_FEATURES;
1448                 if (copy_to_user(featurep, &features, sizeof features))
1449                         return -EFAULT;
1450                 return 0;
1451         case VHOST_SET_FEATURES:
1452                 if (copy_from_user(&features, featurep, sizeof features))
1453                         return -EFAULT;
1454                 return vhost_scsi_set_features(vs, features);
1455         default:
1456                 mutex_lock(&vs->dev.mutex);
1457                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1458                 /* TODO: flush backend after dev ioctl. */
1459                 if (r == -ENOIOCTLCMD)
1460                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1461                 mutex_unlock(&vs->dev.mutex);
1462                 return r;
1463         }
1464 }
1465
1466 #ifdef CONFIG_COMPAT
1467 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1468                                 unsigned long arg)
1469 {
1470         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1471 }
1472 #endif
1473
1474 static const struct file_operations vhost_scsi_fops = {
1475         .owner          = THIS_MODULE,
1476         .release        = vhost_scsi_release,
1477         .unlocked_ioctl = vhost_scsi_ioctl,
1478 #ifdef CONFIG_COMPAT
1479         .compat_ioctl   = vhost_scsi_compat_ioctl,
1480 #endif
1481         .open           = vhost_scsi_open,
1482         .llseek         = noop_llseek,
1483 };
1484
1485 static struct miscdevice vhost_scsi_misc = {
1486         MISC_DYNAMIC_MINOR,
1487         "vhost-scsi",
1488         &vhost_scsi_fops,
1489 };
1490
1491 static int __init vhost_scsi_register(void)
1492 {
1493         return misc_register(&vhost_scsi_misc);
1494 }
1495
1496 static int vhost_scsi_deregister(void)
1497 {
1498         return misc_deregister(&vhost_scsi_misc);
1499 }
1500
1501 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1502 {
1503         switch (tport->tport_proto_id) {
1504         case SCSI_PROTOCOL_SAS:
1505                 return "SAS";
1506         case SCSI_PROTOCOL_FCP:
1507                 return "FCP";
1508         case SCSI_PROTOCOL_ISCSI:
1509                 return "iSCSI";
1510         default:
1511                 break;
1512         }
1513
1514         return "Unknown";
1515 }
1516
1517 static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1518         struct se_lun *lun, bool plug)
1519 {
1520
1521         struct vhost_scsi *vs = tpg->vhost_scsi;
1522         struct vhost_virtqueue *vq;
1523         u32 reason;
1524
1525         if (!vs)
1526                 return;
1527
1528         mutex_lock(&vs->dev.mutex);
1529         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1530                 mutex_unlock(&vs->dev.mutex);
1531                 return;
1532         }
1533
1534         if (plug)
1535                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1536         else
1537                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1538
1539         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1540         mutex_lock(&vq->mutex);
1541         tcm_vhost_send_evt(vs, tpg, lun,
1542                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1543         mutex_unlock(&vq->mutex);
1544         mutex_unlock(&vs->dev.mutex);
1545 }
1546
1547 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1548 {
1549         tcm_vhost_do_plug(tpg, lun, true);
1550 }
1551
1552 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1553 {
1554         tcm_vhost_do_plug(tpg, lun, false);
1555 }
1556
1557 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1558         struct se_lun *lun)
1559 {
1560         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1561                                 struct tcm_vhost_tpg, se_tpg);
1562
1563         mutex_lock(&tcm_vhost_mutex);
1564
1565         mutex_lock(&tv_tpg->tv_tpg_mutex);
1566         tv_tpg->tv_tpg_port_count++;
1567         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1568
1569         tcm_vhost_hotplug(tv_tpg, lun);
1570
1571         mutex_unlock(&tcm_vhost_mutex);
1572
1573         return 0;
1574 }
1575
1576 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1577         struct se_lun *lun)
1578 {
1579         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1580                                 struct tcm_vhost_tpg, se_tpg);
1581
1582         mutex_lock(&tcm_vhost_mutex);
1583
1584         mutex_lock(&tv_tpg->tv_tpg_mutex);
1585         tv_tpg->tv_tpg_port_count--;
1586         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1587
1588         tcm_vhost_hotunplug(tv_tpg, lun);
1589
1590         mutex_unlock(&tcm_vhost_mutex);
1591 }
1592
1593 static struct se_node_acl *tcm_vhost_make_nodeacl(
1594         struct se_portal_group *se_tpg,
1595         struct config_group *group,
1596         const char *name)
1597 {
1598         struct se_node_acl *se_nacl, *se_nacl_new;
1599         struct tcm_vhost_nacl *nacl;
1600         u64 wwpn = 0;
1601         u32 nexus_depth;
1602
1603         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1604                 return ERR_PTR(-EINVAL); */
1605         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1606         if (!se_nacl_new)
1607                 return ERR_PTR(-ENOMEM);
1608
1609         nexus_depth = 1;
1610         /*
1611          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1612          * when converting a NodeACL from demo mode -> explict
1613          */
1614         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1615                                 name, nexus_depth);
1616         if (IS_ERR(se_nacl)) {
1617                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1618                 return se_nacl;
1619         }
1620         /*
1621          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1622          */
1623         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1624         nacl->iport_wwpn = wwpn;
1625
1626         return se_nacl;
1627 }
1628
1629 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1630 {
1631         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1632                                 struct tcm_vhost_nacl, se_node_acl);
1633         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1634         kfree(nacl);
1635 }
1636
1637 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
1638         const char *name)
1639 {
1640         struct se_portal_group *se_tpg;
1641         struct tcm_vhost_nexus *tv_nexus;
1642
1643         mutex_lock(&tv_tpg->tv_tpg_mutex);
1644         if (tv_tpg->tpg_nexus) {
1645                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1646                 pr_debug("tv_tpg->tpg_nexus already exists\n");
1647                 return -EEXIST;
1648         }
1649         se_tpg = &tv_tpg->se_tpg;
1650
1651         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1652         if (!tv_nexus) {
1653                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1654                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1655                 return -ENOMEM;
1656         }
1657         /*
1658          *  Initialize the struct se_session pointer
1659          */
1660         tv_nexus->tvn_se_sess = transport_init_session();
1661         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1662                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1663                 kfree(tv_nexus);
1664                 return -ENOMEM;
1665         }
1666         /*
1667          * Since we are running in 'demo mode' this call with generate a
1668          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1669          * the SCSI Initiator port name of the passed configfs group 'name'.
1670          */
1671         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1672                                 se_tpg, (unsigned char *)name);
1673         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1674                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1675                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1676                                 " for %s\n", name);
1677                 transport_free_session(tv_nexus->tvn_se_sess);
1678                 kfree(tv_nexus);
1679                 return -ENOMEM;
1680         }
1681         /*
1682          * Now register the TCM vhost virtual I_T Nexus as active with the
1683          * call to __transport_register_session()
1684          */
1685         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1686                         tv_nexus->tvn_se_sess, tv_nexus);
1687         tv_tpg->tpg_nexus = tv_nexus;
1688
1689         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1690         return 0;
1691 }
1692
1693 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1694 {
1695         struct se_session *se_sess;
1696         struct tcm_vhost_nexus *tv_nexus;
1697
1698         mutex_lock(&tpg->tv_tpg_mutex);
1699         tv_nexus = tpg->tpg_nexus;
1700         if (!tv_nexus) {
1701                 mutex_unlock(&tpg->tv_tpg_mutex);
1702                 return -ENODEV;
1703         }
1704
1705         se_sess = tv_nexus->tvn_se_sess;
1706         if (!se_sess) {
1707                 mutex_unlock(&tpg->tv_tpg_mutex);
1708                 return -ENODEV;
1709         }
1710
1711         if (tpg->tv_tpg_port_count != 0) {
1712                 mutex_unlock(&tpg->tv_tpg_mutex);
1713                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1714                         " active TPG port count: %d\n",
1715                         tpg->tv_tpg_port_count);
1716                 return -EBUSY;
1717         }
1718
1719         if (tpg->tv_tpg_vhost_count != 0) {
1720                 mutex_unlock(&tpg->tv_tpg_mutex);
1721                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1722                         " active TPG vhost count: %d\n",
1723                         tpg->tv_tpg_vhost_count);
1724                 return -EBUSY;
1725         }
1726
1727         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1728                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1729                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1730         /*
1731          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1732          */
1733         transport_deregister_session(tv_nexus->tvn_se_sess);
1734         tpg->tpg_nexus = NULL;
1735         mutex_unlock(&tpg->tv_tpg_mutex);
1736
1737         kfree(tv_nexus);
1738         return 0;
1739 }
1740
1741 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1742         char *page)
1743 {
1744         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1745                                 struct tcm_vhost_tpg, se_tpg);
1746         struct tcm_vhost_nexus *tv_nexus;
1747         ssize_t ret;
1748
1749         mutex_lock(&tv_tpg->tv_tpg_mutex);
1750         tv_nexus = tv_tpg->tpg_nexus;
1751         if (!tv_nexus) {
1752                 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1753                 return -ENODEV;
1754         }
1755         ret = snprintf(page, PAGE_SIZE, "%s\n",
1756                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1757         mutex_unlock(&tv_tpg->tv_tpg_mutex);
1758
1759         return ret;
1760 }
1761
1762 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1763         const char *page,
1764         size_t count)
1765 {
1766         struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1767                                 struct tcm_vhost_tpg, se_tpg);
1768         struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1769         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1770         int ret;
1771         /*
1772          * Shutdown the active I_T nexus if 'NULL' is passed..
1773          */
1774         if (!strncmp(page, "NULL", 4)) {
1775                 ret = tcm_vhost_drop_nexus(tv_tpg);
1776                 return (!ret) ? count : ret;
1777         }
1778         /*
1779          * Otherwise make sure the passed virtual Initiator port WWN matches
1780          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1781          * tcm_vhost_make_nexus().
1782          */
1783         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1784                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1785                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1786                 return -EINVAL;
1787         }
1788         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1789
1790         ptr = strstr(i_port, "naa.");
1791         if (ptr) {
1792                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1793                         pr_err("Passed SAS Initiator Port %s does not"
1794                                 " match target port protoid: %s\n", i_port,
1795                                 tcm_vhost_dump_proto_id(tport_wwn));
1796                         return -EINVAL;
1797                 }
1798                 port_ptr = &i_port[0];
1799                 goto check_newline;
1800         }
1801         ptr = strstr(i_port, "fc.");
1802         if (ptr) {
1803                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1804                         pr_err("Passed FCP Initiator Port %s does not"
1805                                 " match target port protoid: %s\n", i_port,
1806                                 tcm_vhost_dump_proto_id(tport_wwn));
1807                         return -EINVAL;
1808                 }
1809                 port_ptr = &i_port[3]; /* Skip over "fc." */
1810                 goto check_newline;
1811         }
1812         ptr = strstr(i_port, "iqn.");
1813         if (ptr) {
1814                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1815                         pr_err("Passed iSCSI Initiator Port %s does not"
1816                                 " match target port protoid: %s\n", i_port,
1817                                 tcm_vhost_dump_proto_id(tport_wwn));
1818                         return -EINVAL;
1819                 }
1820                 port_ptr = &i_port[0];
1821                 goto check_newline;
1822         }
1823         pr_err("Unable to locate prefix for emulated Initiator Port:"
1824                         " %s\n", i_port);
1825         return -EINVAL;
1826         /*
1827          * Clear any trailing newline for the NAA WWN
1828          */
1829 check_newline:
1830         if (i_port[strlen(i_port)-1] == '\n')
1831                 i_port[strlen(i_port)-1] = '\0';
1832
1833         ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1834         if (ret < 0)
1835                 return ret;
1836
1837         return count;
1838 }
1839
1840 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1841
1842 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1843         &tcm_vhost_tpg_nexus.attr,
1844         NULL,
1845 };
1846
1847 static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
1848         struct config_group *group,
1849         const char *name)
1850 {
1851         struct tcm_vhost_tport *tport = container_of(wwn,
1852                         struct tcm_vhost_tport, tport_wwn);
1853
1854         struct tcm_vhost_tpg *tpg;
1855         unsigned long tpgt;
1856         int ret;
1857
1858         if (strstr(name, "tpgt_") != name)
1859                 return ERR_PTR(-EINVAL);
1860         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1861                 return ERR_PTR(-EINVAL);
1862
1863         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1864         if (!tpg) {
1865                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1866                 return ERR_PTR(-ENOMEM);
1867         }
1868         mutex_init(&tpg->tv_tpg_mutex);
1869         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1870         tpg->tport = tport;
1871         tpg->tport_tpgt = tpgt;
1872
1873         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1874                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1875         if (ret < 0) {
1876                 kfree(tpg);
1877                 return NULL;
1878         }
1879         mutex_lock(&tcm_vhost_mutex);
1880         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1881         mutex_unlock(&tcm_vhost_mutex);
1882
1883         return &tpg->se_tpg;
1884 }
1885
1886 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1887 {
1888         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1889                                 struct tcm_vhost_tpg, se_tpg);
1890
1891         mutex_lock(&tcm_vhost_mutex);
1892         list_del(&tpg->tv_tpg_list);
1893         mutex_unlock(&tcm_vhost_mutex);
1894         /*
1895          * Release the virtual I_T Nexus for this vhost TPG
1896          */
1897         tcm_vhost_drop_nexus(tpg);
1898         /*
1899          * Deregister the se_tpg from TCM..
1900          */
1901         core_tpg_deregister(se_tpg);
1902         kfree(tpg);
1903 }
1904
1905 static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
1906         struct config_group *group,
1907         const char *name)
1908 {
1909         struct tcm_vhost_tport *tport;
1910         char *ptr;
1911         u64 wwpn = 0;
1912         int off = 0;
1913
1914         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1915                 return ERR_PTR(-EINVAL); */
1916
1917         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1918         if (!tport) {
1919                 pr_err("Unable to allocate struct tcm_vhost_tport");
1920                 return ERR_PTR(-ENOMEM);
1921         }
1922         tport->tport_wwpn = wwpn;
1923         /*
1924          * Determine the emulated Protocol Identifier and Target Port Name
1925          * based on the incoming configfs directory name.
1926          */
1927         ptr = strstr(name, "naa.");
1928         if (ptr) {
1929                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1930                 goto check_len;
1931         }
1932         ptr = strstr(name, "fc.");
1933         if (ptr) {
1934                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1935                 off = 3; /* Skip over "fc." */
1936                 goto check_len;
1937         }
1938         ptr = strstr(name, "iqn.");
1939         if (ptr) {
1940                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1941                 goto check_len;
1942         }
1943
1944         pr_err("Unable to locate prefix for emulated Target Port:"
1945                         " %s\n", name);
1946         kfree(tport);
1947         return ERR_PTR(-EINVAL);
1948
1949 check_len:
1950         if (strlen(name) >= TCM_VHOST_NAMELEN) {
1951                 pr_err("Emulated %s Address: %s, exceeds"
1952                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1953                         TCM_VHOST_NAMELEN);
1954                 kfree(tport);
1955                 return ERR_PTR(-EINVAL);
1956         }
1957         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1958
1959         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1960                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1961
1962         return &tport->tport_wwn;
1963 }
1964
1965 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1966 {
1967         struct tcm_vhost_tport *tport = container_of(wwn,
1968                                 struct tcm_vhost_tport, tport_wwn);
1969
1970         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1971                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1972                 tport->tport_name);
1973
1974         kfree(tport);
1975 }
1976
1977 static ssize_t tcm_vhost_wwn_show_attr_version(
1978         struct target_fabric_configfs *tf,
1979         char *page)
1980 {
1981         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1982                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1983                 utsname()->machine);
1984 }
1985
1986 TF_WWN_ATTR_RO(tcm_vhost, version);
1987
1988 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1989         &tcm_vhost_wwn_version.attr,
1990         NULL,
1991 };
1992
1993 static struct target_core_fabric_ops tcm_vhost_ops = {
1994         .get_fabric_name                = tcm_vhost_get_fabric_name,
1995         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
1996         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
1997         .tpg_get_tag                    = tcm_vhost_get_tag,
1998         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
1999         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2000         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2001         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2002         .tpg_check_demo_mode            = tcm_vhost_check_true,
2003         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2004         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2005         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2006         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2007         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2008         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2009         .release_cmd                    = tcm_vhost_release_cmd,
2010         .shutdown_session               = tcm_vhost_shutdown_session,
2011         .close_session                  = tcm_vhost_close_session,
2012         .sess_get_index                 = tcm_vhost_sess_get_index,
2013         .sess_get_initiator_sid         = NULL,
2014         .write_pending                  = tcm_vhost_write_pending,
2015         .write_pending_status           = tcm_vhost_write_pending_status,
2016         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2017         .get_task_tag                   = tcm_vhost_get_task_tag,
2018         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2019         .queue_data_in                  = tcm_vhost_queue_data_in,
2020         .queue_status                   = tcm_vhost_queue_status,
2021         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2022         /*
2023          * Setup callers for generic logic in target_core_fabric_configfs.c
2024          */
2025         .fabric_make_wwn                = tcm_vhost_make_tport,
2026         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2027         .fabric_make_tpg                = tcm_vhost_make_tpg,
2028         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2029         .fabric_post_link               = tcm_vhost_port_link,
2030         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2031         .fabric_make_np                 = NULL,
2032         .fabric_drop_np                 = NULL,
2033         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2034         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2035 };
2036
2037 static int tcm_vhost_register_configfs(void)
2038 {
2039         struct target_fabric_configfs *fabric;
2040         int ret;
2041
2042         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2043                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2044                 utsname()->machine);
2045         /*
2046          * Register the top level struct config_item_type with TCM core
2047          */
2048         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2049         if (IS_ERR(fabric)) {
2050                 pr_err("target_fabric_configfs_init() failed\n");
2051                 return PTR_ERR(fabric);
2052         }
2053         /*
2054          * Setup fabric->tf_ops from our local tcm_vhost_ops
2055          */
2056         fabric->tf_ops = tcm_vhost_ops;
2057         /*
2058          * Setup default attribute lists for various fabric->tf_cit_tmpl
2059          */
2060         TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2061         TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2062         TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
2063         TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
2064         TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
2065         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2066         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2067         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2068         TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2069         /*
2070          * Register the fabric for use within TCM
2071          */
2072         ret = target_fabric_configfs_register(fabric);
2073         if (ret < 0) {
2074                 pr_err("target_fabric_configfs_register() failed"
2075                                 " for TCM_VHOST\n");
2076                 return ret;
2077         }
2078         /*
2079          * Setup our local pointer to *fabric
2080          */
2081         tcm_vhost_fabric_configfs = fabric;
2082         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2083         return 0;
2084 };
2085
2086 static void tcm_vhost_deregister_configfs(void)
2087 {
2088         if (!tcm_vhost_fabric_configfs)
2089                 return;
2090
2091         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2092         tcm_vhost_fabric_configfs = NULL;
2093         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2094 };
2095
2096 static int __init tcm_vhost_init(void)
2097 {
2098         int ret = -ENOMEM;
2099         /*
2100          * Use our own dedicated workqueue for submitting I/O into
2101          * target core to avoid contention within system_wq.
2102          */
2103         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2104         if (!tcm_vhost_workqueue)
2105                 goto out;
2106
2107         ret = vhost_scsi_register();
2108         if (ret < 0)
2109                 goto out_destroy_workqueue;
2110
2111         ret = tcm_vhost_register_configfs();
2112         if (ret < 0)
2113                 goto out_vhost_scsi_deregister;
2114
2115         return 0;
2116
2117 out_vhost_scsi_deregister:
2118         vhost_scsi_deregister();
2119 out_destroy_workqueue:
2120         destroy_workqueue(tcm_vhost_workqueue);
2121 out:
2122         return ret;
2123 };
2124
2125 static void tcm_vhost_exit(void)
2126 {
2127         tcm_vhost_deregister_configfs();
2128         vhost_scsi_deregister();
2129         destroy_workqueue(tcm_vhost_workqueue);
2130 };
2131
2132 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2133 MODULE_ALIAS("tcm_vhost");
2134 MODULE_LICENSE("GPL");
2135 module_init(tcm_vhost_init);
2136 module_exit(tcm_vhost_exit);