]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/nvme/host/fc.c
c3ab1043efbddf14a86ab7c8c625667f8c312b61
[karo-tx-linux.git] / drivers / nvme / host / fc.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
23
24 #include "nvme.h"
25 #include "fabrics.h"
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
28
29
30 /* *************************** Data Structures/Defines ****************** */
31
32
33 /*
34  * We handle AEN commands ourselves and don't even let the
35  * block layer know about them.
36  */
37 #define NVME_FC_NR_AEN_COMMANDS 1
38 #define NVME_FC_AQ_BLKMQ_DEPTH  \
39         (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
40 #define AEN_CMDID_BASE          (NVME_FC_AQ_BLKMQ_DEPTH + 1)
41
42 enum nvme_fc_queue_flags {
43         NVME_FC_Q_CONNECTED = (1 << 0),
44 };
45
46 #define NVMEFC_QUEUE_DELAY      3               /* ms units */
47
48 #define NVME_FC_MAX_CONNECT_ATTEMPTS    1
49
50 struct nvme_fc_queue {
51         struct nvme_fc_ctrl     *ctrl;
52         struct device           *dev;
53         struct blk_mq_hw_ctx    *hctx;
54         void                    *lldd_handle;
55         int                     queue_size;
56         size_t                  cmnd_capsule_len;
57         u32                     qnum;
58         u32                     rqcnt;
59         u32                     seqno;
60
61         u64                     connection_id;
62         atomic_t                csn;
63
64         unsigned long           flags;
65 } __aligned(sizeof(u64));       /* alignment for other things alloc'd with */
66
67 enum nvme_fcop_flags {
68         FCOP_FLAGS_TERMIO       = (1 << 0),
69         FCOP_FLAGS_RELEASED     = (1 << 1),
70         FCOP_FLAGS_COMPLETE     = (1 << 2),
71         FCOP_FLAGS_AEN          = (1 << 3),
72 };
73
74 struct nvmefc_ls_req_op {
75         struct nvmefc_ls_req    ls_req;
76
77         struct nvme_fc_rport    *rport;
78         struct nvme_fc_queue    *queue;
79         struct request          *rq;
80         u32                     flags;
81
82         int                     ls_error;
83         struct completion       ls_done;
84         struct list_head        lsreq_list;     /* rport->ls_req_list */
85         bool                    req_queued;
86 };
87
88 enum nvme_fcpop_state {
89         FCPOP_STATE_UNINIT      = 0,
90         FCPOP_STATE_IDLE        = 1,
91         FCPOP_STATE_ACTIVE      = 2,
92         FCPOP_STATE_ABORTED     = 3,
93         FCPOP_STATE_COMPLETE    = 4,
94 };
95
96 struct nvme_fc_fcp_op {
97         struct nvme_request     nreq;           /*
98                                                  * nvme/host/core.c
99                                                  * requires this to be
100                                                  * the 1st element in the
101                                                  * private structure
102                                                  * associated with the
103                                                  * request.
104                                                  */
105         struct nvmefc_fcp_req   fcp_req;
106
107         struct nvme_fc_ctrl     *ctrl;
108         struct nvme_fc_queue    *queue;
109         struct request          *rq;
110
111         atomic_t                state;
112         u32                     flags;
113         u32                     rqno;
114         u32                     nents;
115
116         struct nvme_fc_cmd_iu   cmd_iu;
117         struct nvme_fc_ersp_iu  rsp_iu;
118 };
119
120 struct nvme_fc_lport {
121         struct nvme_fc_local_port       localport;
122
123         struct ida                      endp_cnt;
124         struct list_head                port_list;      /* nvme_fc_port_list */
125         struct list_head                endp_list;
126         struct device                   *dev;   /* physical device for dma */
127         struct nvme_fc_port_template    *ops;
128         struct kref                     ref;
129 } __aligned(sizeof(u64));       /* alignment for other things alloc'd with */
130
131 struct nvme_fc_rport {
132         struct nvme_fc_remote_port      remoteport;
133
134         struct list_head                endp_list; /* for lport->endp_list */
135         struct list_head                ctrl_list;
136         struct list_head                ls_req_list;
137         struct device                   *dev;   /* physical device for dma */
138         struct nvme_fc_lport            *lport;
139         spinlock_t                      lock;
140         struct kref                     ref;
141 } __aligned(sizeof(u64));       /* alignment for other things alloc'd with */
142
143 enum nvme_fcctrl_flags {
144         FCCTRL_TERMIO           = (1 << 0),
145 };
146
147 struct nvme_fc_ctrl {
148         spinlock_t              lock;
149         struct nvme_fc_queue    *queues;
150         struct device           *dev;
151         struct nvme_fc_lport    *lport;
152         struct nvme_fc_rport    *rport;
153         u32                     queue_count;
154         u32                     cnum;
155
156         u64                     association_id;
157
158         u64                     cap;
159
160         struct list_head        ctrl_list;      /* rport->ctrl_list */
161
162         struct blk_mq_tag_set   admin_tag_set;
163         struct blk_mq_tag_set   tag_set;
164
165         struct work_struct      delete_work;
166         struct work_struct      reset_work;
167         struct delayed_work     connect_work;
168         int                     connect_attempts;
169
170         struct kref             ref;
171         u32                     flags;
172         u32                     iocnt;
173
174         struct nvme_fc_fcp_op   aen_ops[NVME_FC_NR_AEN_COMMANDS];
175
176         struct nvme_ctrl        ctrl;
177 };
178
179 static inline struct nvme_fc_ctrl *
180 to_fc_ctrl(struct nvme_ctrl *ctrl)
181 {
182         return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
183 }
184
185 static inline struct nvme_fc_lport *
186 localport_to_lport(struct nvme_fc_local_port *portptr)
187 {
188         return container_of(portptr, struct nvme_fc_lport, localport);
189 }
190
191 static inline struct nvme_fc_rport *
192 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
193 {
194         return container_of(portptr, struct nvme_fc_rport, remoteport);
195 }
196
197 static inline struct nvmefc_ls_req_op *
198 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
199 {
200         return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
201 }
202
203 static inline struct nvme_fc_fcp_op *
204 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
205 {
206         return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
207 }
208
209
210
211 /* *************************** Globals **************************** */
212
213
214 static DEFINE_SPINLOCK(nvme_fc_lock);
215
216 static LIST_HEAD(nvme_fc_lport_list);
217 static DEFINE_IDA(nvme_fc_local_port_cnt);
218 static DEFINE_IDA(nvme_fc_ctrl_cnt);
219
220 static struct workqueue_struct *nvme_fc_wq;
221
222
223
224 /* *********************** FC-NVME Port Management ************************ */
225
226 static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
227 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
228                         struct nvme_fc_queue *, unsigned int);
229
230
231 /**
232  * nvme_fc_register_localport - transport entry point called by an
233  *                              LLDD to register the existence of a NVME
234  *                              host FC port.
235  * @pinfo:     pointer to information about the port to be registered
236  * @template:  LLDD entrypoints and operational parameters for the port
237  * @dev:       physical hardware device node port corresponds to. Will be
238  *             used for DMA mappings
239  * @lport_p:   pointer to a local port pointer. Upon success, the routine
240  *             will allocate a nvme_fc_local_port structure and place its
241  *             address in the local port pointer. Upon failure, local port
242  *             pointer will be set to 0.
243  *
244  * Returns:
245  * a completion status. Must be 0 upon success; a negative errno
246  * (ex: -ENXIO) upon failure.
247  */
248 int
249 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
250                         struct nvme_fc_port_template *template,
251                         struct device *dev,
252                         struct nvme_fc_local_port **portptr)
253 {
254         struct nvme_fc_lport *newrec;
255         unsigned long flags;
256         int ret, idx;
257
258         if (!template->localport_delete || !template->remoteport_delete ||
259             !template->ls_req || !template->fcp_io ||
260             !template->ls_abort || !template->fcp_abort ||
261             !template->max_hw_queues || !template->max_sgl_segments ||
262             !template->max_dif_sgl_segments || !template->dma_boundary) {
263                 ret = -EINVAL;
264                 goto out_reghost_failed;
265         }
266
267         newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
268                          GFP_KERNEL);
269         if (!newrec) {
270                 ret = -ENOMEM;
271                 goto out_reghost_failed;
272         }
273
274         idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
275         if (idx < 0) {
276                 ret = -ENOSPC;
277                 goto out_fail_kfree;
278         }
279
280         if (!get_device(dev) && dev) {
281                 ret = -ENODEV;
282                 goto out_ida_put;
283         }
284
285         INIT_LIST_HEAD(&newrec->port_list);
286         INIT_LIST_HEAD(&newrec->endp_list);
287         kref_init(&newrec->ref);
288         newrec->ops = template;
289         newrec->dev = dev;
290         ida_init(&newrec->endp_cnt);
291         newrec->localport.private = &newrec[1];
292         newrec->localport.node_name = pinfo->node_name;
293         newrec->localport.port_name = pinfo->port_name;
294         newrec->localport.port_role = pinfo->port_role;
295         newrec->localport.port_id = pinfo->port_id;
296         newrec->localport.port_state = FC_OBJSTATE_ONLINE;
297         newrec->localport.port_num = idx;
298
299         spin_lock_irqsave(&nvme_fc_lock, flags);
300         list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
301         spin_unlock_irqrestore(&nvme_fc_lock, flags);
302
303         if (dev)
304                 dma_set_seg_boundary(dev, template->dma_boundary);
305
306         *portptr = &newrec->localport;
307         return 0;
308
309 out_ida_put:
310         ida_simple_remove(&nvme_fc_local_port_cnt, idx);
311 out_fail_kfree:
312         kfree(newrec);
313 out_reghost_failed:
314         *portptr = NULL;
315
316         return ret;
317 }
318 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
319
320 static void
321 nvme_fc_free_lport(struct kref *ref)
322 {
323         struct nvme_fc_lport *lport =
324                 container_of(ref, struct nvme_fc_lport, ref);
325         unsigned long flags;
326
327         WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
328         WARN_ON(!list_empty(&lport->endp_list));
329
330         /* remove from transport list */
331         spin_lock_irqsave(&nvme_fc_lock, flags);
332         list_del(&lport->port_list);
333         spin_unlock_irqrestore(&nvme_fc_lock, flags);
334
335         /* let the LLDD know we've finished tearing it down */
336         lport->ops->localport_delete(&lport->localport);
337
338         ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
339         ida_destroy(&lport->endp_cnt);
340
341         put_device(lport->dev);
342
343         kfree(lport);
344 }
345
346 static void
347 nvme_fc_lport_put(struct nvme_fc_lport *lport)
348 {
349         kref_put(&lport->ref, nvme_fc_free_lport);
350 }
351
352 static int
353 nvme_fc_lport_get(struct nvme_fc_lport *lport)
354 {
355         return kref_get_unless_zero(&lport->ref);
356 }
357
358 /**
359  * nvme_fc_unregister_localport - transport entry point called by an
360  *                              LLDD to deregister/remove a previously
361  *                              registered a NVME host FC port.
362  * @localport: pointer to the (registered) local port that is to be
363  *             deregistered.
364  *
365  * Returns:
366  * a completion status. Must be 0 upon success; a negative errno
367  * (ex: -ENXIO) upon failure.
368  */
369 int
370 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
371 {
372         struct nvme_fc_lport *lport = localport_to_lport(portptr);
373         unsigned long flags;
374
375         if (!portptr)
376                 return -EINVAL;
377
378         spin_lock_irqsave(&nvme_fc_lock, flags);
379
380         if (portptr->port_state != FC_OBJSTATE_ONLINE) {
381                 spin_unlock_irqrestore(&nvme_fc_lock, flags);
382                 return -EINVAL;
383         }
384         portptr->port_state = FC_OBJSTATE_DELETED;
385
386         spin_unlock_irqrestore(&nvme_fc_lock, flags);
387
388         nvme_fc_lport_put(lport);
389
390         return 0;
391 }
392 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
393
394 /**
395  * nvme_fc_register_remoteport - transport entry point called by an
396  *                              LLDD to register the existence of a NVME
397  *                              subsystem FC port on its fabric.
398  * @localport: pointer to the (registered) local port that the remote
399  *             subsystem port is connected to.
400  * @pinfo:     pointer to information about the port to be registered
401  * @rport_p:   pointer to a remote port pointer. Upon success, the routine
402  *             will allocate a nvme_fc_remote_port structure and place its
403  *             address in the remote port pointer. Upon failure, remote port
404  *             pointer will be set to 0.
405  *
406  * Returns:
407  * a completion status. Must be 0 upon success; a negative errno
408  * (ex: -ENXIO) upon failure.
409  */
410 int
411 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
412                                 struct nvme_fc_port_info *pinfo,
413                                 struct nvme_fc_remote_port **portptr)
414 {
415         struct nvme_fc_lport *lport = localport_to_lport(localport);
416         struct nvme_fc_rport *newrec;
417         unsigned long flags;
418         int ret, idx;
419
420         newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
421                          GFP_KERNEL);
422         if (!newrec) {
423                 ret = -ENOMEM;
424                 goto out_reghost_failed;
425         }
426
427         if (!nvme_fc_lport_get(lport)) {
428                 ret = -ESHUTDOWN;
429                 goto out_kfree_rport;
430         }
431
432         idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
433         if (idx < 0) {
434                 ret = -ENOSPC;
435                 goto out_lport_put;
436         }
437
438         INIT_LIST_HEAD(&newrec->endp_list);
439         INIT_LIST_HEAD(&newrec->ctrl_list);
440         INIT_LIST_HEAD(&newrec->ls_req_list);
441         kref_init(&newrec->ref);
442         spin_lock_init(&newrec->lock);
443         newrec->remoteport.localport = &lport->localport;
444         newrec->dev = lport->dev;
445         newrec->lport = lport;
446         newrec->remoteport.private = &newrec[1];
447         newrec->remoteport.port_role = pinfo->port_role;
448         newrec->remoteport.node_name = pinfo->node_name;
449         newrec->remoteport.port_name = pinfo->port_name;
450         newrec->remoteport.port_id = pinfo->port_id;
451         newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
452         newrec->remoteport.port_num = idx;
453
454         spin_lock_irqsave(&nvme_fc_lock, flags);
455         list_add_tail(&newrec->endp_list, &lport->endp_list);
456         spin_unlock_irqrestore(&nvme_fc_lock, flags);
457
458         *portptr = &newrec->remoteport;
459         return 0;
460
461 out_lport_put:
462         nvme_fc_lport_put(lport);
463 out_kfree_rport:
464         kfree(newrec);
465 out_reghost_failed:
466         *portptr = NULL;
467         return ret;
468 }
469 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
470
471 static void
472 nvme_fc_free_rport(struct kref *ref)
473 {
474         struct nvme_fc_rport *rport =
475                 container_of(ref, struct nvme_fc_rport, ref);
476         struct nvme_fc_lport *lport =
477                         localport_to_lport(rport->remoteport.localport);
478         unsigned long flags;
479
480         WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
481         WARN_ON(!list_empty(&rport->ctrl_list));
482
483         /* remove from lport list */
484         spin_lock_irqsave(&nvme_fc_lock, flags);
485         list_del(&rport->endp_list);
486         spin_unlock_irqrestore(&nvme_fc_lock, flags);
487
488         /* let the LLDD know we've finished tearing it down */
489         lport->ops->remoteport_delete(&rport->remoteport);
490
491         ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
492
493         kfree(rport);
494
495         nvme_fc_lport_put(lport);
496 }
497
498 static void
499 nvme_fc_rport_put(struct nvme_fc_rport *rport)
500 {
501         kref_put(&rport->ref, nvme_fc_free_rport);
502 }
503
504 static int
505 nvme_fc_rport_get(struct nvme_fc_rport *rport)
506 {
507         return kref_get_unless_zero(&rport->ref);
508 }
509
510 static int
511 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
512 {
513         struct nvmefc_ls_req_op *lsop;
514         unsigned long flags;
515
516 restart:
517         spin_lock_irqsave(&rport->lock, flags);
518
519         list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
520                 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
521                         lsop->flags |= FCOP_FLAGS_TERMIO;
522                         spin_unlock_irqrestore(&rport->lock, flags);
523                         rport->lport->ops->ls_abort(&rport->lport->localport,
524                                                 &rport->remoteport,
525                                                 &lsop->ls_req);
526                         goto restart;
527                 }
528         }
529         spin_unlock_irqrestore(&rport->lock, flags);
530
531         return 0;
532 }
533
534 /**
535  * nvme_fc_unregister_remoteport - transport entry point called by an
536  *                              LLDD to deregister/remove a previously
537  *                              registered a NVME subsystem FC port.
538  * @remoteport: pointer to the (registered) remote port that is to be
539  *              deregistered.
540  *
541  * Returns:
542  * a completion status. Must be 0 upon success; a negative errno
543  * (ex: -ENXIO) upon failure.
544  */
545 int
546 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
547 {
548         struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
549         struct nvme_fc_ctrl *ctrl;
550         unsigned long flags;
551
552         if (!portptr)
553                 return -EINVAL;
554
555         spin_lock_irqsave(&rport->lock, flags);
556
557         if (portptr->port_state != FC_OBJSTATE_ONLINE) {
558                 spin_unlock_irqrestore(&rport->lock, flags);
559                 return -EINVAL;
560         }
561         portptr->port_state = FC_OBJSTATE_DELETED;
562
563         /* tear down all associations to the remote port */
564         list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
565                 __nvme_fc_del_ctrl(ctrl);
566
567         spin_unlock_irqrestore(&rport->lock, flags);
568
569         nvme_fc_abort_lsops(rport);
570
571         nvme_fc_rport_put(rport);
572         return 0;
573 }
574 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
575
576
577 /* *********************** FC-NVME DMA Handling **************************** */
578
579 /*
580  * The fcloop device passes in a NULL device pointer. Real LLD's will
581  * pass in a valid device pointer. If NULL is passed to the dma mapping
582  * routines, depending on the platform, it may or may not succeed, and
583  * may crash.
584  *
585  * As such:
586  * Wrapper all the dma routines and check the dev pointer.
587  *
588  * If simple mappings (return just a dma address, we'll noop them,
589  * returning a dma address of 0.
590  *
591  * On more complex mappings (dma_map_sg), a pseudo routine fills
592  * in the scatter list, setting all dma addresses to 0.
593  */
594
595 static inline dma_addr_t
596 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
597                 enum dma_data_direction dir)
598 {
599         return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
600 }
601
602 static inline int
603 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
604 {
605         return dev ? dma_mapping_error(dev, dma_addr) : 0;
606 }
607
608 static inline void
609 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
610         enum dma_data_direction dir)
611 {
612         if (dev)
613                 dma_unmap_single(dev, addr, size, dir);
614 }
615
616 static inline void
617 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
618                 enum dma_data_direction dir)
619 {
620         if (dev)
621                 dma_sync_single_for_cpu(dev, addr, size, dir);
622 }
623
624 static inline void
625 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
626                 enum dma_data_direction dir)
627 {
628         if (dev)
629                 dma_sync_single_for_device(dev, addr, size, dir);
630 }
631
632 /* pseudo dma_map_sg call */
633 static int
634 fc_map_sg(struct scatterlist *sg, int nents)
635 {
636         struct scatterlist *s;
637         int i;
638
639         WARN_ON(nents == 0 || sg[0].length == 0);
640
641         for_each_sg(sg, s, nents, i) {
642                 s->dma_address = 0L;
643 #ifdef CONFIG_NEED_SG_DMA_LENGTH
644                 s->dma_length = s->length;
645 #endif
646         }
647         return nents;
648 }
649
650 static inline int
651 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
652                 enum dma_data_direction dir)
653 {
654         return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
655 }
656
657 static inline void
658 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
659                 enum dma_data_direction dir)
660 {
661         if (dev)
662                 dma_unmap_sg(dev, sg, nents, dir);
663 }
664
665
666 /* *********************** FC-NVME LS Handling **************************** */
667
668 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
669 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
670
671
672 static void
673 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
674 {
675         struct nvme_fc_rport *rport = lsop->rport;
676         struct nvmefc_ls_req *lsreq = &lsop->ls_req;
677         unsigned long flags;
678
679         spin_lock_irqsave(&rport->lock, flags);
680
681         if (!lsop->req_queued) {
682                 spin_unlock_irqrestore(&rport->lock, flags);
683                 return;
684         }
685
686         list_del(&lsop->lsreq_list);
687
688         lsop->req_queued = false;
689
690         spin_unlock_irqrestore(&rport->lock, flags);
691
692         fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
693                                   (lsreq->rqstlen + lsreq->rsplen),
694                                   DMA_BIDIRECTIONAL);
695
696         nvme_fc_rport_put(rport);
697 }
698
699 static int
700 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
701                 struct nvmefc_ls_req_op *lsop,
702                 void (*done)(struct nvmefc_ls_req *req, int status))
703 {
704         struct nvmefc_ls_req *lsreq = &lsop->ls_req;
705         unsigned long flags;
706         int ret = 0;
707
708         if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
709                 return -ECONNREFUSED;
710
711         if (!nvme_fc_rport_get(rport))
712                 return -ESHUTDOWN;
713
714         lsreq->done = done;
715         lsop->rport = rport;
716         lsop->req_queued = false;
717         INIT_LIST_HEAD(&lsop->lsreq_list);
718         init_completion(&lsop->ls_done);
719
720         lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
721                                   lsreq->rqstlen + lsreq->rsplen,
722                                   DMA_BIDIRECTIONAL);
723         if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
724                 ret = -EFAULT;
725                 goto out_putrport;
726         }
727         lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
728
729         spin_lock_irqsave(&rport->lock, flags);
730
731         list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
732
733         lsop->req_queued = true;
734
735         spin_unlock_irqrestore(&rport->lock, flags);
736
737         ret = rport->lport->ops->ls_req(&rport->lport->localport,
738                                         &rport->remoteport, lsreq);
739         if (ret)
740                 goto out_unlink;
741
742         return 0;
743
744 out_unlink:
745         lsop->ls_error = ret;
746         spin_lock_irqsave(&rport->lock, flags);
747         lsop->req_queued = false;
748         list_del(&lsop->lsreq_list);
749         spin_unlock_irqrestore(&rport->lock, flags);
750         fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
751                                   (lsreq->rqstlen + lsreq->rsplen),
752                                   DMA_BIDIRECTIONAL);
753 out_putrport:
754         nvme_fc_rport_put(rport);
755
756         return ret;
757 }
758
759 static void
760 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
761 {
762         struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
763
764         lsop->ls_error = status;
765         complete(&lsop->ls_done);
766 }
767
768 static int
769 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
770 {
771         struct nvmefc_ls_req *lsreq = &lsop->ls_req;
772         struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
773         int ret;
774
775         ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
776
777         if (!ret) {
778                 /*
779                  * No timeout/not interruptible as we need the struct
780                  * to exist until the lldd calls us back. Thus mandate
781                  * wait until driver calls back. lldd responsible for
782                  * the timeout action
783                  */
784                 wait_for_completion(&lsop->ls_done);
785
786                 __nvme_fc_finish_ls_req(lsop);
787
788                 ret = lsop->ls_error;
789         }
790
791         if (ret)
792                 return ret;
793
794         /* ACC or RJT payload ? */
795         if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
796                 return -ENXIO;
797
798         return 0;
799 }
800
801 static int
802 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
803                 struct nvmefc_ls_req_op *lsop,
804                 void (*done)(struct nvmefc_ls_req *req, int status))
805 {
806         /* don't wait for completion */
807
808         return __nvme_fc_send_ls_req(rport, lsop, done);
809 }
810
811 /* Validation Error indexes into the string table below */
812 enum {
813         VERR_NO_ERROR           = 0,
814         VERR_LSACC              = 1,
815         VERR_LSDESC_RQST        = 2,
816         VERR_LSDESC_RQST_LEN    = 3,
817         VERR_ASSOC_ID           = 4,
818         VERR_ASSOC_ID_LEN       = 5,
819         VERR_CONN_ID            = 6,
820         VERR_CONN_ID_LEN        = 7,
821         VERR_CR_ASSOC           = 8,
822         VERR_CR_ASSOC_ACC_LEN   = 9,
823         VERR_CR_CONN            = 10,
824         VERR_CR_CONN_ACC_LEN    = 11,
825         VERR_DISCONN            = 12,
826         VERR_DISCONN_ACC_LEN    = 13,
827 };
828
829 static char *validation_errors[] = {
830         "OK",
831         "Not LS_ACC",
832         "Not LSDESC_RQST",
833         "Bad LSDESC_RQST Length",
834         "Not Association ID",
835         "Bad Association ID Length",
836         "Not Connection ID",
837         "Bad Connection ID Length",
838         "Not CR_ASSOC Rqst",
839         "Bad CR_ASSOC ACC Length",
840         "Not CR_CONN Rqst",
841         "Bad CR_CONN ACC Length",
842         "Not Disconnect Rqst",
843         "Bad Disconnect ACC Length",
844 };
845
846 static int
847 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
848         struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
849 {
850         struct nvmefc_ls_req_op *lsop;
851         struct nvmefc_ls_req *lsreq;
852         struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
853         struct fcnvme_ls_cr_assoc_acc *assoc_acc;
854         int ret, fcret = 0;
855
856         lsop = kzalloc((sizeof(*lsop) +
857                          ctrl->lport->ops->lsrqst_priv_sz +
858                          sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
859         if (!lsop) {
860                 ret = -ENOMEM;
861                 goto out_no_memory;
862         }
863         lsreq = &lsop->ls_req;
864
865         lsreq->private = (void *)&lsop[1];
866         assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
867                         (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
868         assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
869
870         assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
871         assoc_rqst->desc_list_len =
872                         cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
873
874         assoc_rqst->assoc_cmd.desc_tag =
875                         cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
876         assoc_rqst->assoc_cmd.desc_len =
877                         fcnvme_lsdesc_len(
878                                 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
879
880         assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
881         assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
882         /* Linux supports only Dynamic controllers */
883         assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
884         memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
885                 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
886         strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
887                 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
888         strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
889                 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
890
891         lsop->queue = queue;
892         lsreq->rqstaddr = assoc_rqst;
893         lsreq->rqstlen = sizeof(*assoc_rqst);
894         lsreq->rspaddr = assoc_acc;
895         lsreq->rsplen = sizeof(*assoc_acc);
896         lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
897
898         ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
899         if (ret)
900                 goto out_free_buffer;
901
902         /* process connect LS completion */
903
904         /* validate the ACC response */
905         if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
906                 fcret = VERR_LSACC;
907         else if (assoc_acc->hdr.desc_list_len !=
908                         fcnvme_lsdesc_len(
909                                 sizeof(struct fcnvme_ls_cr_assoc_acc)))
910                 fcret = VERR_CR_ASSOC_ACC_LEN;
911         else if (assoc_acc->hdr.rqst.desc_tag !=
912                         cpu_to_be32(FCNVME_LSDESC_RQST))
913                 fcret = VERR_LSDESC_RQST;
914         else if (assoc_acc->hdr.rqst.desc_len !=
915                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
916                 fcret = VERR_LSDESC_RQST_LEN;
917         else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
918                 fcret = VERR_CR_ASSOC;
919         else if (assoc_acc->associd.desc_tag !=
920                         cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
921                 fcret = VERR_ASSOC_ID;
922         else if (assoc_acc->associd.desc_len !=
923                         fcnvme_lsdesc_len(
924                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
925                 fcret = VERR_ASSOC_ID_LEN;
926         else if (assoc_acc->connectid.desc_tag !=
927                         cpu_to_be32(FCNVME_LSDESC_CONN_ID))
928                 fcret = VERR_CONN_ID;
929         else if (assoc_acc->connectid.desc_len !=
930                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
931                 fcret = VERR_CONN_ID_LEN;
932
933         if (fcret) {
934                 ret = -EBADF;
935                 dev_err(ctrl->dev,
936                         "q %d connect failed: %s\n",
937                         queue->qnum, validation_errors[fcret]);
938         } else {
939                 ctrl->association_id =
940                         be64_to_cpu(assoc_acc->associd.association_id);
941                 queue->connection_id =
942                         be64_to_cpu(assoc_acc->connectid.connection_id);
943                 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
944         }
945
946 out_free_buffer:
947         kfree(lsop);
948 out_no_memory:
949         if (ret)
950                 dev_err(ctrl->dev,
951                         "queue %d connect admin queue failed (%d).\n",
952                         queue->qnum, ret);
953         return ret;
954 }
955
956 static int
957 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
958                         u16 qsize, u16 ersp_ratio)
959 {
960         struct nvmefc_ls_req_op *lsop;
961         struct nvmefc_ls_req *lsreq;
962         struct fcnvme_ls_cr_conn_rqst *conn_rqst;
963         struct fcnvme_ls_cr_conn_acc *conn_acc;
964         int ret, fcret = 0;
965
966         lsop = kzalloc((sizeof(*lsop) +
967                          ctrl->lport->ops->lsrqst_priv_sz +
968                          sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
969         if (!lsop) {
970                 ret = -ENOMEM;
971                 goto out_no_memory;
972         }
973         lsreq = &lsop->ls_req;
974
975         lsreq->private = (void *)&lsop[1];
976         conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
977                         (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
978         conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
979
980         conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
981         conn_rqst->desc_list_len = cpu_to_be32(
982                                 sizeof(struct fcnvme_lsdesc_assoc_id) +
983                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
984
985         conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
986         conn_rqst->associd.desc_len =
987                         fcnvme_lsdesc_len(
988                                 sizeof(struct fcnvme_lsdesc_assoc_id));
989         conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
990         conn_rqst->connect_cmd.desc_tag =
991                         cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
992         conn_rqst->connect_cmd.desc_len =
993                         fcnvme_lsdesc_len(
994                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
995         conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
996         conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
997         conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
998
999         lsop->queue = queue;
1000         lsreq->rqstaddr = conn_rqst;
1001         lsreq->rqstlen = sizeof(*conn_rqst);
1002         lsreq->rspaddr = conn_acc;
1003         lsreq->rsplen = sizeof(*conn_acc);
1004         lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1005
1006         ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1007         if (ret)
1008                 goto out_free_buffer;
1009
1010         /* process connect LS completion */
1011
1012         /* validate the ACC response */
1013         if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1014                 fcret = VERR_LSACC;
1015         else if (conn_acc->hdr.desc_list_len !=
1016                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1017                 fcret = VERR_CR_CONN_ACC_LEN;
1018         else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1019                 fcret = VERR_LSDESC_RQST;
1020         else if (conn_acc->hdr.rqst.desc_len !=
1021                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1022                 fcret = VERR_LSDESC_RQST_LEN;
1023         else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1024                 fcret = VERR_CR_CONN;
1025         else if (conn_acc->connectid.desc_tag !=
1026                         cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1027                 fcret = VERR_CONN_ID;
1028         else if (conn_acc->connectid.desc_len !=
1029                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1030                 fcret = VERR_CONN_ID_LEN;
1031
1032         if (fcret) {
1033                 ret = -EBADF;
1034                 dev_err(ctrl->dev,
1035                         "q %d connect failed: %s\n",
1036                         queue->qnum, validation_errors[fcret]);
1037         } else {
1038                 queue->connection_id =
1039                         be64_to_cpu(conn_acc->connectid.connection_id);
1040                 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1041         }
1042
1043 out_free_buffer:
1044         kfree(lsop);
1045 out_no_memory:
1046         if (ret)
1047                 dev_err(ctrl->dev,
1048                         "queue %d connect command failed (%d).\n",
1049                         queue->qnum, ret);
1050         return ret;
1051 }
1052
1053 static void
1054 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1055 {
1056         struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1057
1058         __nvme_fc_finish_ls_req(lsop);
1059
1060         /* fc-nvme iniator doesn't care about success or failure of cmd */
1061
1062         kfree(lsop);
1063 }
1064
1065 /*
1066  * This routine sends a FC-NVME LS to disconnect (aka terminate)
1067  * the FC-NVME Association.  Terminating the association also
1068  * terminates the FC-NVME connections (per queue, both admin and io
1069  * queues) that are part of the association. E.g. things are torn
1070  * down, and the related FC-NVME Association ID and Connection IDs
1071  * become invalid.
1072  *
1073  * The behavior of the fc-nvme initiator is such that it's
1074  * understanding of the association and connections will implicitly
1075  * be torn down. The action is implicit as it may be due to a loss of
1076  * connectivity with the fc-nvme target, so you may never get a
1077  * response even if you tried.  As such, the action of this routine
1078  * is to asynchronously send the LS, ignore any results of the LS, and
1079  * continue on with terminating the association. If the fc-nvme target
1080  * is present and receives the LS, it too can tear down.
1081  */
1082 static void
1083 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1084 {
1085         struct fcnvme_ls_disconnect_rqst *discon_rqst;
1086         struct fcnvme_ls_disconnect_acc *discon_acc;
1087         struct nvmefc_ls_req_op *lsop;
1088         struct nvmefc_ls_req *lsreq;
1089         int ret;
1090
1091         lsop = kzalloc((sizeof(*lsop) +
1092                          ctrl->lport->ops->lsrqst_priv_sz +
1093                          sizeof(*discon_rqst) + sizeof(*discon_acc)),
1094                         GFP_KERNEL);
1095         if (!lsop)
1096                 /* couldn't sent it... too bad */
1097                 return;
1098
1099         lsreq = &lsop->ls_req;
1100
1101         lsreq->private = (void *)&lsop[1];
1102         discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1103                         (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1104         discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1105
1106         discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1107         discon_rqst->desc_list_len = cpu_to_be32(
1108                                 sizeof(struct fcnvme_lsdesc_assoc_id) +
1109                                 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1110
1111         discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1112         discon_rqst->associd.desc_len =
1113                         fcnvme_lsdesc_len(
1114                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1115
1116         discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1117
1118         discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1119                                                 FCNVME_LSDESC_DISCONN_CMD);
1120         discon_rqst->discon_cmd.desc_len =
1121                         fcnvme_lsdesc_len(
1122                                 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1123         discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1124         discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1125
1126         lsreq->rqstaddr = discon_rqst;
1127         lsreq->rqstlen = sizeof(*discon_rqst);
1128         lsreq->rspaddr = discon_acc;
1129         lsreq->rsplen = sizeof(*discon_acc);
1130         lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1131
1132         ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1133                                 nvme_fc_disconnect_assoc_done);
1134         if (ret)
1135                 kfree(lsop);
1136
1137         /* only meaningful part to terminating the association */
1138         ctrl->association_id = 0;
1139 }
1140
1141
1142 /* *********************** NVME Ctrl Routines **************************** */
1143
1144 static void __nvme_fc_final_op_cleanup(struct request *rq);
1145
1146 static int
1147 nvme_fc_reinit_request(void *data, struct request *rq)
1148 {
1149         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1150         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1151
1152         memset(cmdiu, 0, sizeof(*cmdiu));
1153         cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1154         cmdiu->fc_id = NVME_CMD_FC_ID;
1155         cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1156         memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1157
1158         return 0;
1159 }
1160
1161 static void
1162 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1163                 struct nvme_fc_fcp_op *op)
1164 {
1165         fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1166                                 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1167         fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1168                                 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1169
1170         atomic_set(&op->state, FCPOP_STATE_UNINIT);
1171 }
1172
1173 static void
1174 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1175                 unsigned int hctx_idx)
1176 {
1177         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1178
1179         return __nvme_fc_exit_request(set->driver_data, op);
1180 }
1181
1182 static int
1183 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1184 {
1185         int state;
1186
1187         state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1188         if (state != FCPOP_STATE_ACTIVE) {
1189                 atomic_set(&op->state, state);
1190                 return -ECANCELED;
1191         }
1192
1193         ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1194                                         &ctrl->rport->remoteport,
1195                                         op->queue->lldd_handle,
1196                                         &op->fcp_req);
1197
1198         return 0;
1199 }
1200
1201 static void
1202 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1203 {
1204         struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1205         unsigned long flags;
1206         int i, ret;
1207
1208         for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1209                 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1210                         continue;
1211
1212                 spin_lock_irqsave(&ctrl->lock, flags);
1213                 if (ctrl->flags & FCCTRL_TERMIO) {
1214                         ctrl->iocnt++;
1215                         aen_op->flags |= FCOP_FLAGS_TERMIO;
1216                 }
1217                 spin_unlock_irqrestore(&ctrl->lock, flags);
1218
1219                 ret = __nvme_fc_abort_op(ctrl, aen_op);
1220                 if (ret) {
1221                         /*
1222                          * if __nvme_fc_abort_op failed the io wasn't
1223                          * active. Thus this call path is running in
1224                          * parallel to the io complete. Treat as non-error.
1225                          */
1226
1227                         /* back out the flags/counters */
1228                         spin_lock_irqsave(&ctrl->lock, flags);
1229                         if (ctrl->flags & FCCTRL_TERMIO)
1230                                 ctrl->iocnt--;
1231                         aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1232                         spin_unlock_irqrestore(&ctrl->lock, flags);
1233                         return;
1234                 }
1235         }
1236 }
1237
1238 static inline int
1239 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1240                 struct nvme_fc_fcp_op *op)
1241 {
1242         unsigned long flags;
1243         bool complete_rq = false;
1244
1245         spin_lock_irqsave(&ctrl->lock, flags);
1246         if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1247                 if (ctrl->flags & FCCTRL_TERMIO)
1248                         ctrl->iocnt--;
1249         }
1250         if (op->flags & FCOP_FLAGS_RELEASED)
1251                 complete_rq = true;
1252         else
1253                 op->flags |= FCOP_FLAGS_COMPLETE;
1254         spin_unlock_irqrestore(&ctrl->lock, flags);
1255
1256         return complete_rq;
1257 }
1258
1259 static void
1260 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1261 {
1262         struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1263         struct request *rq = op->rq;
1264         struct nvmefc_fcp_req *freq = &op->fcp_req;
1265         struct nvme_fc_ctrl *ctrl = op->ctrl;
1266         struct nvme_fc_queue *queue = op->queue;
1267         struct nvme_completion *cqe = &op->rsp_iu.cqe;
1268         struct nvme_command *sqe = &op->cmd_iu.sqe;
1269         __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1270         union nvme_result result;
1271         bool complete_rq;
1272
1273         /*
1274          * WARNING:
1275          * The current linux implementation of a nvme controller
1276          * allocates a single tag set for all io queues and sizes
1277          * the io queues to fully hold all possible tags. Thus, the
1278          * implementation does not reference or care about the sqhd
1279          * value as it never needs to use the sqhd/sqtail pointers
1280          * for submission pacing.
1281          *
1282          * This affects the FC-NVME implementation in two ways:
1283          * 1) As the value doesn't matter, we don't need to waste
1284          *    cycles extracting it from ERSPs and stamping it in the
1285          *    cases where the transport fabricates CQEs on successful
1286          *    completions.
1287          * 2) The FC-NVME implementation requires that delivery of
1288          *    ERSP completions are to go back to the nvme layer in order
1289          *    relative to the rsn, such that the sqhd value will always
1290          *    be "in order" for the nvme layer. As the nvme layer in
1291          *    linux doesn't care about sqhd, there's no need to return
1292          *    them in order.
1293          *
1294          * Additionally:
1295          * As the core nvme layer in linux currently does not look at
1296          * every field in the cqe - in cases where the FC transport must
1297          * fabricate a CQE, the following fields will not be set as they
1298          * are not referenced:
1299          *      cqe.sqid,  cqe.sqhd,  cqe.command_id
1300          */
1301
1302         fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1303                                 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1304
1305         if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1306                 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1307         else if (freq->status)
1308                 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1309
1310         /*
1311          * For the linux implementation, if we have an unsuccesful
1312          * status, they blk-mq layer can typically be called with the
1313          * non-zero status and the content of the cqe isn't important.
1314          */
1315         if (status)
1316                 goto done;
1317
1318         /*
1319          * command completed successfully relative to the wire
1320          * protocol. However, validate anything received and
1321          * extract the status and result from the cqe (create it
1322          * where necessary).
1323          */
1324
1325         switch (freq->rcv_rsplen) {
1326
1327         case 0:
1328         case NVME_FC_SIZEOF_ZEROS_RSP:
1329                 /*
1330                  * No response payload or 12 bytes of payload (which
1331                  * should all be zeros) are considered successful and
1332                  * no payload in the CQE by the transport.
1333                  */
1334                 if (freq->transferred_length !=
1335                         be32_to_cpu(op->cmd_iu.data_len)) {
1336                         status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1337                         goto done;
1338                 }
1339                 result.u64 = 0;
1340                 break;
1341
1342         case sizeof(struct nvme_fc_ersp_iu):
1343                 /*
1344                  * The ERSP IU contains a full completion with CQE.
1345                  * Validate ERSP IU and look at cqe.
1346                  */
1347                 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1348                                         (freq->rcv_rsplen / 4) ||
1349                              be32_to_cpu(op->rsp_iu.xfrd_len) !=
1350                                         freq->transferred_length ||
1351                              op->rsp_iu.status_code ||
1352                              sqe->common.command_id != cqe->command_id)) {
1353                         status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1354                         goto done;
1355                 }
1356                 result = cqe->result;
1357                 status = cqe->status;
1358                 break;
1359
1360         default:
1361                 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1362                 goto done;
1363         }
1364
1365 done:
1366         if (op->flags & FCOP_FLAGS_AEN) {
1367                 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1368                 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1369                 atomic_set(&op->state, FCPOP_STATE_IDLE);
1370                 op->flags = FCOP_FLAGS_AEN;     /* clear other flags */
1371                 nvme_fc_ctrl_put(ctrl);
1372                 return;
1373         }
1374
1375         complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1376         if (!complete_rq) {
1377                 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1378                         status = cpu_to_le16(NVME_SC_ABORT_REQ);
1379                         if (blk_queue_dying(rq->q))
1380                                 status |= cpu_to_le16(NVME_SC_DNR);
1381                 }
1382                 nvme_end_request(rq, status, result);
1383         } else
1384                 __nvme_fc_final_op_cleanup(rq);
1385 }
1386
1387 static int
1388 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1389                 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1390                 struct request *rq, u32 rqno)
1391 {
1392         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1393         int ret = 0;
1394
1395         memset(op, 0, sizeof(*op));
1396         op->fcp_req.cmdaddr = &op->cmd_iu;
1397         op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1398         op->fcp_req.rspaddr = &op->rsp_iu;
1399         op->fcp_req.rsplen = sizeof(op->rsp_iu);
1400         op->fcp_req.done = nvme_fc_fcpio_done;
1401         op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1402         op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1403         op->ctrl = ctrl;
1404         op->queue = queue;
1405         op->rq = rq;
1406         op->rqno = rqno;
1407
1408         cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1409         cmdiu->fc_id = NVME_CMD_FC_ID;
1410         cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1411
1412         op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1413                                 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1414         if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1415                 dev_err(ctrl->dev,
1416                         "FCP Op failed - cmdiu dma mapping failed.\n");
1417                 ret = EFAULT;
1418                 goto out_on_error;
1419         }
1420
1421         op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1422                                 &op->rsp_iu, sizeof(op->rsp_iu),
1423                                 DMA_FROM_DEVICE);
1424         if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1425                 dev_err(ctrl->dev,
1426                         "FCP Op failed - rspiu dma mapping failed.\n");
1427                 ret = EFAULT;
1428         }
1429
1430         atomic_set(&op->state, FCPOP_STATE_IDLE);
1431 out_on_error:
1432         return ret;
1433 }
1434
1435 static int
1436 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1437                 unsigned int hctx_idx, unsigned int numa_node)
1438 {
1439         struct nvme_fc_ctrl *ctrl = set->driver_data;
1440         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1441         struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1442
1443         return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1444 }
1445
1446 static int
1447 nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
1448                 unsigned int hctx_idx, unsigned int numa_node)
1449 {
1450         struct nvme_fc_ctrl *ctrl = set->driver_data;
1451         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1452         struct nvme_fc_queue *queue = &ctrl->queues[0];
1453
1454         return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1455 }
1456
1457 static int
1458 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1459 {
1460         struct nvme_fc_fcp_op *aen_op;
1461         struct nvme_fc_cmd_iu *cmdiu;
1462         struct nvme_command *sqe;
1463         void *private;
1464         int i, ret;
1465
1466         aen_op = ctrl->aen_ops;
1467         for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1468                 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1469                                                 GFP_KERNEL);
1470                 if (!private)
1471                         return -ENOMEM;
1472
1473                 cmdiu = &aen_op->cmd_iu;
1474                 sqe = &cmdiu->sqe;
1475                 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1476                                 aen_op, (struct request *)NULL,
1477                                 (AEN_CMDID_BASE + i));
1478                 if (ret) {
1479                         kfree(private);
1480                         return ret;
1481                 }
1482
1483                 aen_op->flags = FCOP_FLAGS_AEN;
1484                 aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1485                 aen_op->fcp_req.private = private;
1486
1487                 memset(sqe, 0, sizeof(*sqe));
1488                 sqe->common.opcode = nvme_admin_async_event;
1489                 /* Note: core layer may overwrite the sqe.command_id value */
1490                 sqe->common.command_id = AEN_CMDID_BASE + i;
1491         }
1492         return 0;
1493 }
1494
1495 static void
1496 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1497 {
1498         struct nvme_fc_fcp_op *aen_op;
1499         int i;
1500
1501         aen_op = ctrl->aen_ops;
1502         for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1503                 if (!aen_op->fcp_req.private)
1504                         continue;
1505
1506                 __nvme_fc_exit_request(ctrl, aen_op);
1507
1508                 kfree(aen_op->fcp_req.private);
1509                 aen_op->fcp_req.private = NULL;
1510         }
1511 }
1512
1513 static inline void
1514 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1515                 unsigned int qidx)
1516 {
1517         struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1518
1519         hctx->driver_data = queue;
1520         queue->hctx = hctx;
1521 }
1522
1523 static int
1524 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1525                 unsigned int hctx_idx)
1526 {
1527         struct nvme_fc_ctrl *ctrl = data;
1528
1529         __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1530
1531         return 0;
1532 }
1533
1534 static int
1535 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1536                 unsigned int hctx_idx)
1537 {
1538         struct nvme_fc_ctrl *ctrl = data;
1539
1540         __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1541
1542         return 0;
1543 }
1544
1545 static void
1546 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1547 {
1548         struct nvme_fc_queue *queue;
1549
1550         queue = &ctrl->queues[idx];
1551         memset(queue, 0, sizeof(*queue));
1552         queue->ctrl = ctrl;
1553         queue->qnum = idx;
1554         atomic_set(&queue->csn, 1);
1555         queue->dev = ctrl->dev;
1556
1557         if (idx > 0)
1558                 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1559         else
1560                 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1561
1562         queue->queue_size = queue_size;
1563
1564         /*
1565          * Considered whether we should allocate buffers for all SQEs
1566          * and CQEs and dma map them - mapping their respective entries
1567          * into the request structures (kernel vm addr and dma address)
1568          * thus the driver could use the buffers/mappings directly.
1569          * It only makes sense if the LLDD would use them for its
1570          * messaging api. It's very unlikely most adapter api's would use
1571          * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1572          * structures were used instead.
1573          */
1574 }
1575
1576 /*
1577  * This routine terminates a queue at the transport level.
1578  * The transport has already ensured that all outstanding ios on
1579  * the queue have been terminated.
1580  * The transport will send a Disconnect LS request to terminate
1581  * the queue's connection. Termination of the admin queue will also
1582  * terminate the association at the target.
1583  */
1584 static void
1585 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1586 {
1587         if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1588                 return;
1589
1590         /*
1591          * Current implementation never disconnects a single queue.
1592          * It always terminates a whole association. So there is never
1593          * a disconnect(queue) LS sent to the target.
1594          */
1595
1596         queue->connection_id = 0;
1597         clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1598 }
1599
1600 static void
1601 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1602         struct nvme_fc_queue *queue, unsigned int qidx)
1603 {
1604         if (ctrl->lport->ops->delete_queue)
1605                 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1606                                 queue->lldd_handle);
1607         queue->lldd_handle = NULL;
1608 }
1609
1610 static void
1611 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1612 {
1613         int i;
1614
1615         for (i = 1; i < ctrl->queue_count; i++)
1616                 nvme_fc_free_queue(&ctrl->queues[i]);
1617 }
1618
1619 static int
1620 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1621         struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1622 {
1623         int ret = 0;
1624
1625         queue->lldd_handle = NULL;
1626         if (ctrl->lport->ops->create_queue)
1627                 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1628                                 qidx, qsize, &queue->lldd_handle);
1629
1630         return ret;
1631 }
1632
1633 static void
1634 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1635 {
1636         struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1637         int i;
1638
1639         for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1640                 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1641 }
1642
1643 static int
1644 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1645 {
1646         struct nvme_fc_queue *queue = &ctrl->queues[1];
1647         int i, ret;
1648
1649         for (i = 1; i < ctrl->queue_count; i++, queue++) {
1650                 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1651                 if (ret)
1652                         goto delete_queues;
1653         }
1654
1655         return 0;
1656
1657 delete_queues:
1658         for (; i >= 0; i--)
1659                 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1660         return ret;
1661 }
1662
1663 static int
1664 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1665 {
1666         int i, ret = 0;
1667
1668         for (i = 1; i < ctrl->queue_count; i++) {
1669                 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1670                                         (qsize / 5));
1671                 if (ret)
1672                         break;
1673                 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1674                 if (ret)
1675                         break;
1676         }
1677
1678         return ret;
1679 }
1680
1681 static void
1682 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1683 {
1684         int i;
1685
1686         for (i = 1; i < ctrl->queue_count; i++)
1687                 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1688 }
1689
1690 static void
1691 nvme_fc_ctrl_free(struct kref *ref)
1692 {
1693         struct nvme_fc_ctrl *ctrl =
1694                 container_of(ref, struct nvme_fc_ctrl, ref);
1695         unsigned long flags;
1696
1697         if (ctrl->ctrl.tagset) {
1698                 blk_cleanup_queue(ctrl->ctrl.connect_q);
1699                 blk_mq_free_tag_set(&ctrl->tag_set);
1700         }
1701
1702         /* remove from rport list */
1703         spin_lock_irqsave(&ctrl->rport->lock, flags);
1704         list_del(&ctrl->ctrl_list);
1705         spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1706
1707         blk_cleanup_queue(ctrl->ctrl.admin_q);
1708         blk_mq_free_tag_set(&ctrl->admin_tag_set);
1709
1710         kfree(ctrl->queues);
1711
1712         put_device(ctrl->dev);
1713         nvme_fc_rport_put(ctrl->rport);
1714
1715         ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1716         if (ctrl->ctrl.opts)
1717                 nvmf_free_options(ctrl->ctrl.opts);
1718         kfree(ctrl);
1719 }
1720
1721 static void
1722 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1723 {
1724         kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1725 }
1726
1727 static int
1728 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1729 {
1730         return kref_get_unless_zero(&ctrl->ref);
1731 }
1732
1733 /*
1734  * All accesses from nvme core layer done - can now free the
1735  * controller. Called after last nvme_put_ctrl() call
1736  */
1737 static void
1738 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
1739 {
1740         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1741
1742         WARN_ON(nctrl != &ctrl->ctrl);
1743
1744         nvme_fc_ctrl_put(ctrl);
1745 }
1746
1747 static void
1748 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1749 {
1750         dev_warn(ctrl->ctrl.device,
1751                 "NVME-FC{%d}: transport association error detected: %s\n",
1752                 ctrl->cnum, errmsg);
1753         dev_info(ctrl->ctrl.device,
1754                 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
1755
1756         /* stop the queues on error, cleanup is in reset thread */
1757         if (ctrl->queue_count > 1)
1758                 nvme_stop_queues(&ctrl->ctrl);
1759
1760         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1761                 dev_err(ctrl->ctrl.device,
1762                         "NVME-FC{%d}: error_recovery: Couldn't change state "
1763                         "to RECONNECTING\n", ctrl->cnum);
1764                 return;
1765         }
1766
1767         if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
1768                 dev_err(ctrl->ctrl.device,
1769                         "NVME-FC{%d}: error_recovery: Failed to schedule "
1770                         "reset work\n", ctrl->cnum);
1771 }
1772
1773 static enum blk_eh_timer_return
1774 nvme_fc_timeout(struct request *rq, bool reserved)
1775 {
1776         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1777         struct nvme_fc_ctrl *ctrl = op->ctrl;
1778         int ret;
1779
1780         if (reserved)
1781                 return BLK_EH_RESET_TIMER;
1782
1783         ret = __nvme_fc_abort_op(ctrl, op);
1784         if (ret)
1785                 /* io wasn't active to abort consider it done */
1786                 return BLK_EH_HANDLED;
1787
1788         /*
1789          * we can't individually ABTS an io without affecting the queue,
1790          * thus killing the queue, adn thus the association.
1791          * So resolve by performing a controller reset, which will stop
1792          * the host/io stack, terminate the association on the link,
1793          * and recreate an association on the link.
1794          */
1795         nvme_fc_error_recovery(ctrl, "io timeout error");
1796
1797         return BLK_EH_HANDLED;
1798 }
1799
1800 static int
1801 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1802                 struct nvme_fc_fcp_op *op)
1803 {
1804         struct nvmefc_fcp_req *freq = &op->fcp_req;
1805         enum dma_data_direction dir;
1806         int ret;
1807
1808         freq->sg_cnt = 0;
1809
1810         if (!blk_rq_payload_bytes(rq))
1811                 return 0;
1812
1813         freq->sg_table.sgl = freq->first_sgl;
1814         ret = sg_alloc_table_chained(&freq->sg_table,
1815                         blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1816         if (ret)
1817                 return -ENOMEM;
1818
1819         op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1820         WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1821         dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1822         freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1823                                 op->nents, dir);
1824         if (unlikely(freq->sg_cnt <= 0)) {
1825                 sg_free_table_chained(&freq->sg_table, true);
1826                 freq->sg_cnt = 0;
1827                 return -EFAULT;
1828         }
1829
1830         /*
1831          * TODO: blk_integrity_rq(rq)  for DIF
1832          */
1833         return 0;
1834 }
1835
1836 static void
1837 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1838                 struct nvme_fc_fcp_op *op)
1839 {
1840         struct nvmefc_fcp_req *freq = &op->fcp_req;
1841
1842         if (!freq->sg_cnt)
1843                 return;
1844
1845         fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1846                                 ((rq_data_dir(rq) == WRITE) ?
1847                                         DMA_TO_DEVICE : DMA_FROM_DEVICE));
1848
1849         nvme_cleanup_cmd(rq);
1850
1851         sg_free_table_chained(&freq->sg_table, true);
1852
1853         freq->sg_cnt = 0;
1854 }
1855
1856 /*
1857  * In FC, the queue is a logical thing. At transport connect, the target
1858  * creates its "queue" and returns a handle that is to be given to the
1859  * target whenever it posts something to the corresponding SQ.  When an
1860  * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1861  * command contained within the SQE, an io, and assigns a FC exchange
1862  * to it. The SQE and the associated SQ handle are sent in the initial
1863  * CMD IU sents on the exchange. All transfers relative to the io occur
1864  * as part of the exchange.  The CQE is the last thing for the io,
1865  * which is transferred (explicitly or implicitly) with the RSP IU
1866  * sent on the exchange. After the CQE is received, the FC exchange is
1867  * terminaed and the Exchange may be used on a different io.
1868  *
1869  * The transport to LLDD api has the transport making a request for a
1870  * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1871  * resource and transfers the command. The LLDD will then process all
1872  * steps to complete the io. Upon completion, the transport done routine
1873  * is called.
1874  *
1875  * So - while the operation is outstanding to the LLDD, there is a link
1876  * level FC exchange resource that is also outstanding. This must be
1877  * considered in all cleanup operations.
1878  */
1879 static int
1880 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1881         struct nvme_fc_fcp_op *op, u32 data_len,
1882         enum nvmefc_fcp_datadir io_dir)
1883 {
1884         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1885         struct nvme_command *sqe = &cmdiu->sqe;
1886         u32 csn;
1887         int ret;
1888
1889         /*
1890          * before attempting to send the io, check to see if we believe
1891          * the target device is present
1892          */
1893         if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1894                 return BLK_MQ_RQ_QUEUE_ERROR;
1895
1896         if (!nvme_fc_ctrl_get(ctrl))
1897                 return BLK_MQ_RQ_QUEUE_ERROR;
1898
1899         /* format the FC-NVME CMD IU and fcp_req */
1900         cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1901         csn = atomic_inc_return(&queue->csn);
1902         cmdiu->csn = cpu_to_be32(csn);
1903         cmdiu->data_len = cpu_to_be32(data_len);
1904         switch (io_dir) {
1905         case NVMEFC_FCP_WRITE:
1906                 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1907                 break;
1908         case NVMEFC_FCP_READ:
1909                 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1910                 break;
1911         case NVMEFC_FCP_NODATA:
1912                 cmdiu->flags = 0;
1913                 break;
1914         }
1915         op->fcp_req.payload_length = data_len;
1916         op->fcp_req.io_dir = io_dir;
1917         op->fcp_req.transferred_length = 0;
1918         op->fcp_req.rcv_rsplen = 0;
1919         op->fcp_req.status = NVME_SC_SUCCESS;
1920         op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1921
1922         /*
1923          * validate per fabric rules, set fields mandated by fabric spec
1924          * as well as those by FC-NVME spec.
1925          */
1926         WARN_ON_ONCE(sqe->common.metadata);
1927         WARN_ON_ONCE(sqe->common.dptr.prp1);
1928         WARN_ON_ONCE(sqe->common.dptr.prp2);
1929         sqe->common.flags |= NVME_CMD_SGL_METABUF;
1930
1931         /*
1932          * format SQE DPTR field per FC-NVME rules
1933          *    type=data block descr; subtype=offset;
1934          *    offset is currently 0.
1935          */
1936         sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1937         sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1938         sqe->rw.dptr.sgl.addr = 0;
1939
1940         if (!(op->flags & FCOP_FLAGS_AEN)) {
1941                 ret = nvme_fc_map_data(ctrl, op->rq, op);
1942                 if (ret < 0) {
1943                         nvme_cleanup_cmd(op->rq);
1944                         nvme_fc_ctrl_put(ctrl);
1945                         return (ret == -ENOMEM || ret == -EAGAIN) ?
1946                                 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1947                 }
1948         }
1949
1950         fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1951                                   sizeof(op->cmd_iu), DMA_TO_DEVICE);
1952
1953         atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1954
1955         if (!(op->flags & FCOP_FLAGS_AEN))
1956                 blk_mq_start_request(op->rq);
1957
1958         ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1959                                         &ctrl->rport->remoteport,
1960                                         queue->lldd_handle, &op->fcp_req);
1961
1962         if (ret) {
1963                 if (op->rq) {                   /* normal request */
1964                         nvme_fc_unmap_data(ctrl, op->rq, op);
1965                         nvme_cleanup_cmd(op->rq);
1966                 }
1967                 /* else - aen. no cleanup needed */
1968
1969                 nvme_fc_ctrl_put(ctrl);
1970
1971                 if (ret != -EBUSY)
1972                         return BLK_MQ_RQ_QUEUE_ERROR;
1973
1974                 if (op->rq) {
1975                         blk_mq_stop_hw_queues(op->rq->q);
1976                         blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1977                 }
1978                 return BLK_MQ_RQ_QUEUE_BUSY;
1979         }
1980
1981         return BLK_MQ_RQ_QUEUE_OK;
1982 }
1983
1984 static int
1985 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1986                         const struct blk_mq_queue_data *bd)
1987 {
1988         struct nvme_ns *ns = hctx->queue->queuedata;
1989         struct nvme_fc_queue *queue = hctx->driver_data;
1990         struct nvme_fc_ctrl *ctrl = queue->ctrl;
1991         struct request *rq = bd->rq;
1992         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1993         struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1994         struct nvme_command *sqe = &cmdiu->sqe;
1995         enum nvmefc_fcp_datadir io_dir;
1996         u32 data_len;
1997         int ret;
1998
1999         ret = nvme_setup_cmd(ns, rq, sqe);
2000         if (ret)
2001                 return ret;
2002
2003         data_len = blk_rq_payload_bytes(rq);
2004         if (data_len)
2005                 io_dir = ((rq_data_dir(rq) == WRITE) ?
2006                                         NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2007         else
2008                 io_dir = NVMEFC_FCP_NODATA;
2009
2010         return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2011 }
2012
2013 static struct blk_mq_tags *
2014 nvme_fc_tagset(struct nvme_fc_queue *queue)
2015 {
2016         if (queue->qnum == 0)
2017                 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2018
2019         return queue->ctrl->tag_set.tags[queue->qnum - 1];
2020 }
2021
2022 static int
2023 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2024
2025 {
2026         struct nvme_fc_queue *queue = hctx->driver_data;
2027         struct nvme_fc_ctrl *ctrl = queue->ctrl;
2028         struct request *req;
2029         struct nvme_fc_fcp_op *op;
2030
2031         req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2032         if (!req)
2033                 return 0;
2034
2035         op = blk_mq_rq_to_pdu(req);
2036
2037         if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2038                  (ctrl->lport->ops->poll_queue))
2039                 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2040                                                  queue->lldd_handle);
2041
2042         return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2043 }
2044
2045 static void
2046 nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2047 {
2048         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2049         struct nvme_fc_fcp_op *aen_op;
2050         unsigned long flags;
2051         bool terminating = false;
2052         int ret;
2053
2054         if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2055                 return;
2056
2057         spin_lock_irqsave(&ctrl->lock, flags);
2058         if (ctrl->flags & FCCTRL_TERMIO)
2059                 terminating = true;
2060         spin_unlock_irqrestore(&ctrl->lock, flags);
2061
2062         if (terminating)
2063                 return;
2064
2065         aen_op = &ctrl->aen_ops[aer_idx];
2066
2067         ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2068                                         NVMEFC_FCP_NODATA);
2069         if (ret)
2070                 dev_err(ctrl->ctrl.device,
2071                         "failed async event work [%d]\n", aer_idx);
2072 }
2073
2074 static void
2075 __nvme_fc_final_op_cleanup(struct request *rq)
2076 {
2077         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2078         struct nvme_fc_ctrl *ctrl = op->ctrl;
2079
2080         atomic_set(&op->state, FCPOP_STATE_IDLE);
2081         op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2082                         FCOP_FLAGS_COMPLETE);
2083
2084         nvme_cleanup_cmd(rq);
2085         nvme_fc_unmap_data(ctrl, rq, op);
2086         nvme_complete_rq(rq);
2087         nvme_fc_ctrl_put(ctrl);
2088
2089 }
2090
2091 static void
2092 nvme_fc_complete_rq(struct request *rq)
2093 {
2094         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2095         struct nvme_fc_ctrl *ctrl = op->ctrl;
2096         unsigned long flags;
2097         bool completed = false;
2098
2099         /*
2100          * the core layer, on controller resets after calling
2101          * nvme_shutdown_ctrl(), calls complete_rq without our
2102          * calling blk_mq_complete_request(), thus there may still
2103          * be live i/o outstanding with the LLDD. Means transport has
2104          * to track complete calls vs fcpio_done calls to know what
2105          * path to take on completes and dones.
2106          */
2107         spin_lock_irqsave(&ctrl->lock, flags);
2108         if (op->flags & FCOP_FLAGS_COMPLETE)
2109                 completed = true;
2110         else
2111                 op->flags |= FCOP_FLAGS_RELEASED;
2112         spin_unlock_irqrestore(&ctrl->lock, flags);
2113
2114         if (completed)
2115                 __nvme_fc_final_op_cleanup(rq);
2116 }
2117
2118 /*
2119  * This routine is used by the transport when it needs to find active
2120  * io on a queue that is to be terminated. The transport uses
2121  * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2122  * this routine to kill them on a 1 by 1 basis.
2123  *
2124  * As FC allocates FC exchange for each io, the transport must contact
2125  * the LLDD to terminate the exchange, thus releasing the FC exchange.
2126  * After terminating the exchange the LLDD will call the transport's
2127  * normal io done path for the request, but it will have an aborted
2128  * status. The done path will return the io request back to the block
2129  * layer with an error status.
2130  */
2131 static void
2132 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2133 {
2134         struct nvme_ctrl *nctrl = data;
2135         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2136         struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2137         unsigned long flags;
2138         int status;
2139
2140         if (!blk_mq_request_started(req))
2141                 return;
2142
2143         spin_lock_irqsave(&ctrl->lock, flags);
2144         if (ctrl->flags & FCCTRL_TERMIO) {
2145                 ctrl->iocnt++;
2146                 op->flags |= FCOP_FLAGS_TERMIO;
2147         }
2148         spin_unlock_irqrestore(&ctrl->lock, flags);
2149
2150         status = __nvme_fc_abort_op(ctrl, op);
2151         if (status) {
2152                 /*
2153                  * if __nvme_fc_abort_op failed the io wasn't
2154                  * active. Thus this call path is running in
2155                  * parallel to the io complete. Treat as non-error.
2156                  */
2157
2158                 /* back out the flags/counters */
2159                 spin_lock_irqsave(&ctrl->lock, flags);
2160                 if (ctrl->flags & FCCTRL_TERMIO)
2161                         ctrl->iocnt--;
2162                 op->flags &= ~FCOP_FLAGS_TERMIO;
2163                 spin_unlock_irqrestore(&ctrl->lock, flags);
2164                 return;
2165         }
2166 }
2167
2168
2169 static const struct blk_mq_ops nvme_fc_mq_ops = {
2170         .queue_rq       = nvme_fc_queue_rq,
2171         .complete       = nvme_fc_complete_rq,
2172         .init_request   = nvme_fc_init_request,
2173         .exit_request   = nvme_fc_exit_request,
2174         .reinit_request = nvme_fc_reinit_request,
2175         .init_hctx      = nvme_fc_init_hctx,
2176         .poll           = nvme_fc_poll,
2177         .timeout        = nvme_fc_timeout,
2178 };
2179
2180 static int
2181 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2182 {
2183         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2184         int ret;
2185
2186         ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2187         if (ret) {
2188                 dev_info(ctrl->ctrl.device,
2189                         "set_queue_count failed: %d\n", ret);
2190                 return ret;
2191         }
2192
2193         ctrl->queue_count = opts->nr_io_queues + 1;
2194         if (!opts->nr_io_queues)
2195                 return 0;
2196
2197         dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2198                         opts->nr_io_queues);
2199
2200         nvme_fc_init_io_queues(ctrl);
2201
2202         memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2203         ctrl->tag_set.ops = &nvme_fc_mq_ops;
2204         ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2205         ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2206         ctrl->tag_set.numa_node = NUMA_NO_NODE;
2207         ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2208         ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2209                                         (SG_CHUNK_SIZE *
2210                                                 sizeof(struct scatterlist)) +
2211                                         ctrl->lport->ops->fcprqst_priv_sz;
2212         ctrl->tag_set.driver_data = ctrl;
2213         ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2214         ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2215
2216         ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2217         if (ret)
2218                 return ret;
2219
2220         ctrl->ctrl.tagset = &ctrl->tag_set;
2221
2222         ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2223         if (IS_ERR(ctrl->ctrl.connect_q)) {
2224                 ret = PTR_ERR(ctrl->ctrl.connect_q);
2225                 goto out_free_tag_set;
2226         }
2227
2228         ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2229         if (ret)
2230                 goto out_cleanup_blk_queue;
2231
2232         ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2233         if (ret)
2234                 goto out_delete_hw_queues;
2235
2236         return 0;
2237
2238 out_delete_hw_queues:
2239         nvme_fc_delete_hw_io_queues(ctrl);
2240 out_cleanup_blk_queue:
2241         nvme_stop_keep_alive(&ctrl->ctrl);
2242         blk_cleanup_queue(ctrl->ctrl.connect_q);
2243 out_free_tag_set:
2244         blk_mq_free_tag_set(&ctrl->tag_set);
2245         nvme_fc_free_io_queues(ctrl);
2246
2247         /* force put free routine to ignore io queues */
2248         ctrl->ctrl.tagset = NULL;
2249
2250         return ret;
2251 }
2252
2253 static int
2254 nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2255 {
2256         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2257         int ret;
2258
2259         ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2260         if (ret) {
2261                 dev_info(ctrl->ctrl.device,
2262                         "set_queue_count failed: %d\n", ret);
2263                 return ret;
2264         }
2265
2266         /* check for io queues existing */
2267         if (ctrl->queue_count == 1)
2268                 return 0;
2269
2270         dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
2271                         opts->nr_io_queues);
2272
2273         nvme_fc_init_io_queues(ctrl);
2274
2275         ret = blk_mq_reinit_tagset(&ctrl->tag_set);
2276         if (ret)
2277                 goto out_free_io_queues;
2278
2279         ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2280         if (ret)
2281                 goto out_free_io_queues;
2282
2283         ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2284         if (ret)
2285                 goto out_delete_hw_queues;
2286
2287         return 0;
2288
2289 out_delete_hw_queues:
2290         nvme_fc_delete_hw_io_queues(ctrl);
2291 out_free_io_queues:
2292         nvme_fc_free_io_queues(ctrl);
2293         return ret;
2294 }
2295
2296 /*
2297  * This routine restarts the controller on the host side, and
2298  * on the link side, recreates the controller association.
2299  */
2300 static int
2301 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2302 {
2303         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2304         u32 segs;
2305         int ret;
2306         bool changed;
2307
2308         ctrl->connect_attempts++;
2309
2310         /*
2311          * Create the admin queue
2312          */
2313
2314         nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2315
2316         ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2317                                 NVME_FC_AQ_BLKMQ_DEPTH);
2318         if (ret)
2319                 goto out_free_queue;
2320
2321         ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2322                                 NVME_FC_AQ_BLKMQ_DEPTH,
2323                                 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
2324         if (ret)
2325                 goto out_delete_hw_queue;
2326
2327         if (ctrl->ctrl.state != NVME_CTRL_NEW)
2328                 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
2329
2330         ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2331         if (ret)
2332                 goto out_disconnect_admin_queue;
2333
2334         /*
2335          * Check controller capabilities
2336          *
2337          * todo:- add code to check if ctrl attributes changed from
2338          * prior connection values
2339          */
2340
2341         ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2342         if (ret) {
2343                 dev_err(ctrl->ctrl.device,
2344                         "prop_get NVME_REG_CAP failed\n");
2345                 goto out_disconnect_admin_queue;
2346         }
2347
2348         ctrl->ctrl.sqsize =
2349                 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2350
2351         ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2352         if (ret)
2353                 goto out_disconnect_admin_queue;
2354
2355         segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2356                         ctrl->lport->ops->max_sgl_segments);
2357         ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2358
2359         ret = nvme_init_identify(&ctrl->ctrl);
2360         if (ret)
2361                 goto out_disconnect_admin_queue;
2362
2363         /* sanity checks */
2364
2365         /* FC-NVME does not have other data in the capsule */
2366         if (ctrl->ctrl.icdoff) {
2367                 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2368                                 ctrl->ctrl.icdoff);
2369                 goto out_disconnect_admin_queue;
2370         }
2371
2372         nvme_start_keep_alive(&ctrl->ctrl);
2373
2374         /* FC-NVME supports normal SGL Data Block Descriptors */
2375
2376         if (opts->queue_size > ctrl->ctrl.maxcmd) {
2377                 /* warn if maxcmd is lower than queue_size */
2378                 dev_warn(ctrl->ctrl.device,
2379                         "queue_size %zu > ctrl maxcmd %u, reducing "
2380                         "to queue_size\n",
2381                         opts->queue_size, ctrl->ctrl.maxcmd);
2382                 opts->queue_size = ctrl->ctrl.maxcmd;
2383         }
2384
2385         ret = nvme_fc_init_aen_ops(ctrl);
2386         if (ret)
2387                 goto out_term_aen_ops;
2388
2389         /*
2390          * Create the io queues
2391          */
2392
2393         if (ctrl->queue_count > 1) {
2394                 if (ctrl->ctrl.state == NVME_CTRL_NEW)
2395                         ret = nvme_fc_create_io_queues(ctrl);
2396                 else
2397                         ret = nvme_fc_reinit_io_queues(ctrl);
2398                 if (ret)
2399                         goto out_term_aen_ops;
2400         }
2401
2402         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2403         WARN_ON_ONCE(!changed);
2404
2405         ctrl->connect_attempts = 0;
2406
2407         kref_get(&ctrl->ctrl.kref);
2408
2409         if (ctrl->queue_count > 1) {
2410                 nvme_start_queues(&ctrl->ctrl);
2411                 nvme_queue_scan(&ctrl->ctrl);
2412                 nvme_queue_async_events(&ctrl->ctrl);
2413         }
2414
2415         return 0;       /* Success */
2416
2417 out_term_aen_ops:
2418         nvme_fc_term_aen_ops(ctrl);
2419         nvme_stop_keep_alive(&ctrl->ctrl);
2420 out_disconnect_admin_queue:
2421         /* send a Disconnect(association) LS to fc-nvme target */
2422         nvme_fc_xmt_disconnect_assoc(ctrl);
2423 out_delete_hw_queue:
2424         __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2425 out_free_queue:
2426         nvme_fc_free_queue(&ctrl->queues[0]);
2427
2428         return ret;
2429 }
2430
2431 /*
2432  * This routine stops operation of the controller on the host side.
2433  * On the host os stack side: Admin and IO queues are stopped,
2434  *   outstanding ios on them terminated via FC ABTS.
2435  * On the link side: the association is terminated.
2436  */
2437 static void
2438 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2439 {
2440         unsigned long flags;
2441
2442         nvme_stop_keep_alive(&ctrl->ctrl);
2443
2444         spin_lock_irqsave(&ctrl->lock, flags);
2445         ctrl->flags |= FCCTRL_TERMIO;
2446         ctrl->iocnt = 0;
2447         spin_unlock_irqrestore(&ctrl->lock, flags);
2448
2449         /*
2450          * If io queues are present, stop them and terminate all outstanding
2451          * ios on them. As FC allocates FC exchange for each io, the
2452          * transport must contact the LLDD to terminate the exchange,
2453          * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2454          * to tell us what io's are busy and invoke a transport routine
2455          * to kill them with the LLDD.  After terminating the exchange
2456          * the LLDD will call the transport's normal io done path, but it
2457          * will have an aborted status. The done path will return the
2458          * io requests back to the block layer as part of normal completions
2459          * (but with error status).
2460          */
2461         if (ctrl->queue_count > 1) {
2462                 nvme_stop_queues(&ctrl->ctrl);
2463                 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2464                                 nvme_fc_terminate_exchange, &ctrl->ctrl);
2465         }
2466
2467         /*
2468          * Other transports, which don't have link-level contexts bound
2469          * to sqe's, would try to gracefully shutdown the controller by
2470          * writing the registers for shutdown and polling (call
2471          * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2472          * just aborted and we will wait on those contexts, and given
2473          * there was no indication of how live the controlelr is on the
2474          * link, don't send more io to create more contexts for the
2475          * shutdown. Let the controller fail via keepalive failure if
2476          * its still present.
2477          */
2478
2479         /*
2480          * clean up the admin queue. Same thing as above.
2481          * use blk_mq_tagset_busy_itr() and the transport routine to
2482          * terminate the exchanges.
2483          */
2484         blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2485         blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2486                                 nvme_fc_terminate_exchange, &ctrl->ctrl);
2487
2488         /* kill the aens as they are a separate path */
2489         nvme_fc_abort_aen_ops(ctrl);
2490
2491         /* wait for all io that had to be aborted */
2492         spin_lock_irqsave(&ctrl->lock, flags);
2493         while (ctrl->iocnt) {
2494                 spin_unlock_irqrestore(&ctrl->lock, flags);
2495                 msleep(1000);
2496                 spin_lock_irqsave(&ctrl->lock, flags);
2497         }
2498         ctrl->flags &= ~FCCTRL_TERMIO;
2499         spin_unlock_irqrestore(&ctrl->lock, flags);
2500
2501         nvme_fc_term_aen_ops(ctrl);
2502
2503         /*
2504          * send a Disconnect(association) LS to fc-nvme target
2505          * Note: could have been sent at top of process, but
2506          * cleaner on link traffic if after the aborts complete.
2507          * Note: if association doesn't exist, association_id will be 0
2508          */
2509         if (ctrl->association_id)
2510                 nvme_fc_xmt_disconnect_assoc(ctrl);
2511
2512         if (ctrl->ctrl.tagset) {
2513                 nvme_fc_delete_hw_io_queues(ctrl);
2514                 nvme_fc_free_io_queues(ctrl);
2515         }
2516
2517         __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2518         nvme_fc_free_queue(&ctrl->queues[0]);
2519 }
2520
2521 static void
2522 nvme_fc_delete_ctrl_work(struct work_struct *work)
2523 {
2524         struct nvme_fc_ctrl *ctrl =
2525                 container_of(work, struct nvme_fc_ctrl, delete_work);
2526
2527         cancel_work_sync(&ctrl->reset_work);
2528         cancel_delayed_work_sync(&ctrl->connect_work);
2529
2530         /*
2531          * kill the association on the link side.  this will block
2532          * waiting for io to terminate
2533          */
2534         nvme_fc_delete_association(ctrl);
2535
2536         /*
2537          * tear down the controller
2538          * This will result in the last reference on the nvme ctrl to
2539          * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
2540          * From there, the transport will tear down it's logical queues and
2541          * association.
2542          */
2543         nvme_uninit_ctrl(&ctrl->ctrl);
2544
2545         nvme_put_ctrl(&ctrl->ctrl);
2546 }
2547
2548 static int
2549 __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2550 {
2551         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2552                 return -EBUSY;
2553
2554         if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2555                 return -EBUSY;
2556
2557         return 0;
2558 }
2559
2560 /*
2561  * Request from nvme core layer to delete the controller
2562  */
2563 static int
2564 nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2565 {
2566         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2567         int ret;
2568
2569         if (!kref_get_unless_zero(&ctrl->ctrl.kref))
2570                 return -EBUSY;
2571
2572         ret = __nvme_fc_del_ctrl(ctrl);
2573
2574         if (!ret)
2575                 flush_workqueue(nvme_fc_wq);
2576
2577         nvme_put_ctrl(&ctrl->ctrl);
2578
2579         return ret;
2580 }
2581
2582 static void
2583 nvme_fc_reset_ctrl_work(struct work_struct *work)
2584 {
2585         struct nvme_fc_ctrl *ctrl =
2586                         container_of(work, struct nvme_fc_ctrl, reset_work);
2587         int ret;
2588
2589         /* will block will waiting for io to terminate */
2590         nvme_fc_delete_association(ctrl);
2591
2592         ret = nvme_fc_create_association(ctrl);
2593         if (ret) {
2594                 dev_warn(ctrl->ctrl.device,
2595                         "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2596                         ctrl->cnum, ret);
2597                 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2598                         dev_warn(ctrl->ctrl.device,
2599                                 "NVME-FC{%d}: Max reconnect attempts (%d) "
2600                                 "reached. Removing controller\n",
2601                                 ctrl->cnum, ctrl->connect_attempts);
2602
2603                         if (!nvme_change_ctrl_state(&ctrl->ctrl,
2604                                 NVME_CTRL_DELETING)) {
2605                                 dev_err(ctrl->ctrl.device,
2606                                         "NVME-FC{%d}: failed to change state "
2607                                         "to DELETING\n", ctrl->cnum);
2608                                 return;
2609                         }
2610
2611                         WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2612                         return;
2613                 }
2614
2615                 dev_warn(ctrl->ctrl.device,
2616                         "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2617                         ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2618                 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2619                                 ctrl->ctrl.opts->reconnect_delay * HZ);
2620         } else
2621                 dev_info(ctrl->ctrl.device,
2622                         "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2623 }
2624
2625 /*
2626  * called by the nvme core layer, for sysfs interface that requests
2627  * a reset of the nvme controller
2628  */
2629 static int
2630 nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2631 {
2632         struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2633
2634         dev_warn(ctrl->ctrl.device,
2635                 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
2636
2637         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
2638                 return -EBUSY;
2639
2640         if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
2641                 return -EBUSY;
2642
2643         flush_work(&ctrl->reset_work);
2644
2645         return 0;
2646 }
2647
2648 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2649         .name                   = "fc",
2650         .module                 = THIS_MODULE,
2651         .is_fabrics             = true,
2652         .reg_read32             = nvmf_reg_read32,
2653         .reg_read64             = nvmf_reg_read64,
2654         .reg_write32            = nvmf_reg_write32,
2655         .reset_ctrl             = nvme_fc_reset_nvme_ctrl,
2656         .free_ctrl              = nvme_fc_nvme_ctrl_freed,
2657         .submit_async_event     = nvme_fc_submit_async_event,
2658         .delete_ctrl            = nvme_fc_del_nvme_ctrl,
2659         .get_subsysnqn          = nvmf_get_subsysnqn,
2660         .get_address            = nvmf_get_address,
2661 };
2662
2663 static void
2664 nvme_fc_connect_ctrl_work(struct work_struct *work)
2665 {
2666         int ret;
2667
2668         struct nvme_fc_ctrl *ctrl =
2669                         container_of(to_delayed_work(work),
2670                                 struct nvme_fc_ctrl, connect_work);
2671
2672         ret = nvme_fc_create_association(ctrl);
2673         if (ret) {
2674                 dev_warn(ctrl->ctrl.device,
2675                         "NVME-FC{%d}: Reconnect attempt failed (%d)\n",
2676                         ctrl->cnum, ret);
2677                 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2678                         dev_warn(ctrl->ctrl.device,
2679                                 "NVME-FC{%d}: Max reconnect attempts (%d) "
2680                                 "reached. Removing controller\n",
2681                                 ctrl->cnum, ctrl->connect_attempts);
2682
2683                         if (!nvme_change_ctrl_state(&ctrl->ctrl,
2684                                 NVME_CTRL_DELETING)) {
2685                                 dev_err(ctrl->ctrl.device,
2686                                         "NVME-FC{%d}: failed to change state "
2687                                         "to DELETING\n", ctrl->cnum);
2688                                 return;
2689                         }
2690
2691                         WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2692                         return;
2693                 }
2694
2695                 dev_warn(ctrl->ctrl.device,
2696                         "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2697                         ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2698                 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2699                                 ctrl->ctrl.opts->reconnect_delay * HZ);
2700         } else
2701                 dev_info(ctrl->ctrl.device,
2702                         "NVME-FC{%d}: controller reconnect complete\n",
2703                         ctrl->cnum);
2704 }
2705
2706
2707 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2708         .queue_rq       = nvme_fc_queue_rq,
2709         .complete       = nvme_fc_complete_rq,
2710         .init_request   = nvme_fc_init_admin_request,
2711         .exit_request   = nvme_fc_exit_request,
2712         .reinit_request = nvme_fc_reinit_request,
2713         .init_hctx      = nvme_fc_init_admin_hctx,
2714         .timeout        = nvme_fc_timeout,
2715 };
2716
2717
2718 static struct nvme_ctrl *
2719 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2720         struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2721 {
2722         struct nvme_fc_ctrl *ctrl;
2723         unsigned long flags;
2724         int ret, idx;
2725
2726         if (!(rport->remoteport.port_role &
2727             (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2728                 ret = -EBADR;
2729                 goto out_fail;
2730         }
2731
2732         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2733         if (!ctrl) {
2734                 ret = -ENOMEM;
2735                 goto out_fail;
2736         }
2737
2738         idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2739         if (idx < 0) {
2740                 ret = -ENOSPC;
2741                 goto out_free_ctrl;
2742         }
2743
2744         ctrl->ctrl.opts = opts;
2745         INIT_LIST_HEAD(&ctrl->ctrl_list);
2746         ctrl->lport = lport;
2747         ctrl->rport = rport;
2748         ctrl->dev = lport->dev;
2749         ctrl->cnum = idx;
2750
2751         get_device(ctrl->dev);
2752         kref_init(&ctrl->ref);
2753
2754         INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2755         INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
2756         INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2757         spin_lock_init(&ctrl->lock);
2758
2759         /* io queue count */
2760         ctrl->queue_count = min_t(unsigned int,
2761                                 opts->nr_io_queues,
2762                                 lport->ops->max_hw_queues);
2763         opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
2764         ctrl->queue_count++;    /* +1 for admin queue */
2765
2766         ctrl->ctrl.sqsize = opts->queue_size - 1;
2767         ctrl->ctrl.kato = opts->kato;
2768
2769         ret = -ENOMEM;
2770         ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2771                                 GFP_KERNEL);
2772         if (!ctrl->queues)
2773                 goto out_free_ida;
2774
2775         memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
2776         ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
2777         ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
2778         ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
2779         ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
2780         ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2781                                         (SG_CHUNK_SIZE *
2782                                                 sizeof(struct scatterlist)) +
2783                                         ctrl->lport->ops->fcprqst_priv_sz;
2784         ctrl->admin_tag_set.driver_data = ctrl;
2785         ctrl->admin_tag_set.nr_hw_queues = 1;
2786         ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2787
2788         ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2789         if (ret)
2790                 goto out_free_queues;
2791
2792         ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2793         if (IS_ERR(ctrl->ctrl.admin_q)) {
2794                 ret = PTR_ERR(ctrl->ctrl.admin_q);
2795                 goto out_free_admin_tag_set;
2796         }
2797
2798         /*
2799          * Would have been nice to init io queues tag set as well.
2800          * However, we require interaction from the controller
2801          * for max io queue count before we can do so.
2802          * Defer this to the connect path.
2803          */
2804
2805         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2806         if (ret)
2807                 goto out_cleanup_admin_q;
2808
2809         /* at this point, teardown path changes to ref counting on nvme ctrl */
2810
2811         spin_lock_irqsave(&rport->lock, flags);
2812         list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2813         spin_unlock_irqrestore(&rport->lock, flags);
2814
2815         ret = nvme_fc_create_association(ctrl);
2816         if (ret) {
2817                 ctrl->ctrl.opts = NULL;
2818                 /* initiate nvme ctrl ref counting teardown */
2819                 nvme_uninit_ctrl(&ctrl->ctrl);
2820                 nvme_put_ctrl(&ctrl->ctrl);
2821
2822                 /* as we're past the point where we transition to the ref
2823                  * counting teardown path, if we return a bad pointer here,
2824                  * the calling routine, thinking it's prior to the
2825                  * transition, will do an rport put. Since the teardown
2826                  * path also does a rport put, we do an extra get here to
2827                  * so proper order/teardown happens.
2828                  */
2829                 nvme_fc_rport_get(rport);
2830
2831                 if (ret > 0)
2832                         ret = -EIO;
2833                 return ERR_PTR(ret);
2834         }
2835
2836         dev_info(ctrl->ctrl.device,
2837                 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2838                 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2839
2840         return &ctrl->ctrl;
2841
2842 out_cleanup_admin_q:
2843         blk_cleanup_queue(ctrl->ctrl.admin_q);
2844 out_free_admin_tag_set:
2845         blk_mq_free_tag_set(&ctrl->admin_tag_set);
2846 out_free_queues:
2847         kfree(ctrl->queues);
2848 out_free_ida:
2849         put_device(ctrl->dev);
2850         ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2851 out_free_ctrl:
2852         kfree(ctrl);
2853 out_fail:
2854         /* exit via here doesn't follow ctlr ref points */
2855         return ERR_PTR(ret);
2856 }
2857
2858 enum {
2859         FCT_TRADDR_ERR          = 0,
2860         FCT_TRADDR_WWNN         = 1 << 0,
2861         FCT_TRADDR_WWPN         = 1 << 1,
2862 };
2863
2864 struct nvmet_fc_traddr {
2865         u64     nn;
2866         u64     pn;
2867 };
2868
2869 static const match_table_t traddr_opt_tokens = {
2870         { FCT_TRADDR_WWNN,      "nn-%s"         },
2871         { FCT_TRADDR_WWPN,      "pn-%s"         },
2872         { FCT_TRADDR_ERR,       NULL            }
2873 };
2874
2875 static int
2876 nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2877 {
2878         substring_t args[MAX_OPT_ARGS];
2879         char *options, *o, *p;
2880         int token, ret = 0;
2881         u64 token64;
2882
2883         options = o = kstrdup(buf, GFP_KERNEL);
2884         if (!options)
2885                 return -ENOMEM;
2886
2887         while ((p = strsep(&o, ":\n")) != NULL) {
2888                 if (!*p)
2889                         continue;
2890
2891                 token = match_token(p, traddr_opt_tokens, args);
2892                 switch (token) {
2893                 case FCT_TRADDR_WWNN:
2894                         if (match_u64(args, &token64)) {
2895                                 ret = -EINVAL;
2896                                 goto out;
2897                         }
2898                         traddr->nn = token64;
2899                         break;
2900                 case FCT_TRADDR_WWPN:
2901                         if (match_u64(args, &token64)) {
2902                                 ret = -EINVAL;
2903                                 goto out;
2904                         }
2905                         traddr->pn = token64;
2906                         break;
2907                 default:
2908                         pr_warn("unknown traddr token or missing value '%s'\n",
2909                                         p);
2910                         ret = -EINVAL;
2911                         goto out;
2912                 }
2913         }
2914
2915 out:
2916         kfree(options);
2917         return ret;
2918 }
2919
2920 static struct nvme_ctrl *
2921 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2922 {
2923         struct nvme_fc_lport *lport;
2924         struct nvme_fc_rport *rport;
2925         struct nvme_ctrl *ctrl;
2926         struct nvmet_fc_traddr laddr = { 0L, 0L };
2927         struct nvmet_fc_traddr raddr = { 0L, 0L };
2928         unsigned long flags;
2929         int ret;
2930
2931         ret = nvme_fc_parse_address(&raddr, opts->traddr);
2932         if (ret || !raddr.nn || !raddr.pn)
2933                 return ERR_PTR(-EINVAL);
2934
2935         ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2936         if (ret || !laddr.nn || !laddr.pn)
2937                 return ERR_PTR(-EINVAL);
2938
2939         /* find the host and remote ports to connect together */
2940         spin_lock_irqsave(&nvme_fc_lock, flags);
2941         list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2942                 if (lport->localport.node_name != laddr.nn ||
2943                     lport->localport.port_name != laddr.pn)
2944                         continue;
2945
2946                 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2947                         if (rport->remoteport.node_name != raddr.nn ||
2948                             rport->remoteport.port_name != raddr.pn)
2949                                 continue;
2950
2951                         /* if fail to get reference fall through. Will error */
2952                         if (!nvme_fc_rport_get(rport))
2953                                 break;
2954
2955                         spin_unlock_irqrestore(&nvme_fc_lock, flags);
2956
2957                         ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
2958                         if (IS_ERR(ctrl))
2959                                 nvme_fc_rport_put(rport);
2960                         return ctrl;
2961                 }
2962         }
2963         spin_unlock_irqrestore(&nvme_fc_lock, flags);
2964
2965         return ERR_PTR(-ENOENT);
2966 }
2967
2968
2969 static struct nvmf_transport_ops nvme_fc_transport = {
2970         .name           = "fc",
2971         .required_opts  = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2972         .allowed_opts   = NVMF_OPT_RECONNECT_DELAY,
2973         .create_ctrl    = nvme_fc_create_ctrl,
2974 };
2975
2976 static int __init nvme_fc_init_module(void)
2977 {
2978         int ret;
2979
2980         nvme_fc_wq = create_workqueue("nvme_fc_wq");
2981         if (!nvme_fc_wq)
2982                 return -ENOMEM;
2983
2984         ret = nvmf_register_transport(&nvme_fc_transport);
2985         if (ret)
2986                 goto err;
2987
2988         return 0;
2989 err:
2990         destroy_workqueue(nvme_fc_wq);
2991         return ret;
2992 }
2993
2994 static void __exit nvme_fc_exit_module(void)
2995 {
2996         /* sanity check - all lports should be removed */
2997         if (!list_empty(&nvme_fc_lport_list))
2998                 pr_warn("%s: localport list not empty\n", __func__);
2999
3000         nvmf_unregister_transport(&nvme_fc_transport);
3001
3002         destroy_workqueue(nvme_fc_wq);
3003
3004         ida_destroy(&nvme_fc_local_port_cnt);
3005         ida_destroy(&nvme_fc_ctrl_cnt);
3006 }
3007
3008 module_init(nvme_fc_init_module);
3009 module_exit(nvme_fc_exit_module);
3010
3011 MODULE_LICENSE("GPL v2");