2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
22 #include <linux/delay.h>
26 #include <linux/nvme-fc-driver.h>
27 #include <linux/nvme-fc.h>
30 /* *************************** Data Structures/Defines ****************** */
34 * We handle AEN commands ourselves and don't even let the
35 * block layer know about them.
37 #define NVME_FC_NR_AEN_COMMANDS 1
38 #define NVME_FC_AQ_BLKMQ_DEPTH \
39 (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
40 #define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
42 enum nvme_fc_queue_flags {
43 NVME_FC_Q_CONNECTED = (1 << 0),
46 #define NVMEFC_QUEUE_DELAY 3 /* ms units */
48 #define NVME_FC_MAX_CONNECT_ATTEMPTS 1
50 struct nvme_fc_queue {
51 struct nvme_fc_ctrl *ctrl;
53 struct blk_mq_hw_ctx *hctx;
56 size_t cmnd_capsule_len;
65 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
67 enum nvme_fcop_flags {
68 FCOP_FLAGS_TERMIO = (1 << 0),
69 FCOP_FLAGS_RELEASED = (1 << 1),
70 FCOP_FLAGS_COMPLETE = (1 << 2),
71 FCOP_FLAGS_AEN = (1 << 3),
74 struct nvmefc_ls_req_op {
75 struct nvmefc_ls_req ls_req;
77 struct nvme_fc_rport *rport;
78 struct nvme_fc_queue *queue;
83 struct completion ls_done;
84 struct list_head lsreq_list; /* rport->ls_req_list */
88 enum nvme_fcpop_state {
89 FCPOP_STATE_UNINIT = 0,
91 FCPOP_STATE_ACTIVE = 2,
92 FCPOP_STATE_ABORTED = 3,
93 FCPOP_STATE_COMPLETE = 4,
96 struct nvme_fc_fcp_op {
97 struct nvme_request nreq; /*
100 * the 1st element in the
102 * associated with the
105 struct nvmefc_fcp_req fcp_req;
107 struct nvme_fc_ctrl *ctrl;
108 struct nvme_fc_queue *queue;
116 struct nvme_fc_cmd_iu cmd_iu;
117 struct nvme_fc_ersp_iu rsp_iu;
120 struct nvme_fc_lport {
121 struct nvme_fc_local_port localport;
124 struct list_head port_list; /* nvme_fc_port_list */
125 struct list_head endp_list;
126 struct device *dev; /* physical device for dma */
127 struct nvme_fc_port_template *ops;
129 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
131 struct nvme_fc_rport {
132 struct nvme_fc_remote_port remoteport;
134 struct list_head endp_list; /* for lport->endp_list */
135 struct list_head ctrl_list;
136 struct list_head ls_req_list;
137 struct device *dev; /* physical device for dma */
138 struct nvme_fc_lport *lport;
141 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
143 enum nvme_fcctrl_flags {
144 FCCTRL_TERMIO = (1 << 0),
147 struct nvme_fc_ctrl {
149 struct nvme_fc_queue *queues;
151 struct nvme_fc_lport *lport;
152 struct nvme_fc_rport *rport;
160 struct list_head ctrl_list; /* rport->ctrl_list */
162 struct blk_mq_tag_set admin_tag_set;
163 struct blk_mq_tag_set tag_set;
165 struct work_struct delete_work;
166 struct work_struct reset_work;
167 struct delayed_work connect_work;
168 int connect_attempts;
174 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
176 struct nvme_ctrl ctrl;
179 static inline struct nvme_fc_ctrl *
180 to_fc_ctrl(struct nvme_ctrl *ctrl)
182 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
185 static inline struct nvme_fc_lport *
186 localport_to_lport(struct nvme_fc_local_port *portptr)
188 return container_of(portptr, struct nvme_fc_lport, localport);
191 static inline struct nvme_fc_rport *
192 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
194 return container_of(portptr, struct nvme_fc_rport, remoteport);
197 static inline struct nvmefc_ls_req_op *
198 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
200 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
203 static inline struct nvme_fc_fcp_op *
204 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
206 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
211 /* *************************** Globals **************************** */
214 static DEFINE_SPINLOCK(nvme_fc_lock);
216 static LIST_HEAD(nvme_fc_lport_list);
217 static DEFINE_IDA(nvme_fc_local_port_cnt);
218 static DEFINE_IDA(nvme_fc_ctrl_cnt);
220 static struct workqueue_struct *nvme_fc_wq;
224 /* *********************** FC-NVME Port Management ************************ */
226 static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
227 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
228 struct nvme_fc_queue *, unsigned int);
232 * nvme_fc_register_localport - transport entry point called by an
233 * LLDD to register the existence of a NVME
235 * @pinfo: pointer to information about the port to be registered
236 * @template: LLDD entrypoints and operational parameters for the port
237 * @dev: physical hardware device node port corresponds to. Will be
238 * used for DMA mappings
239 * @lport_p: pointer to a local port pointer. Upon success, the routine
240 * will allocate a nvme_fc_local_port structure and place its
241 * address in the local port pointer. Upon failure, local port
242 * pointer will be set to 0.
245 * a completion status. Must be 0 upon success; a negative errno
246 * (ex: -ENXIO) upon failure.
249 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
250 struct nvme_fc_port_template *template,
252 struct nvme_fc_local_port **portptr)
254 struct nvme_fc_lport *newrec;
258 if (!template->localport_delete || !template->remoteport_delete ||
259 !template->ls_req || !template->fcp_io ||
260 !template->ls_abort || !template->fcp_abort ||
261 !template->max_hw_queues || !template->max_sgl_segments ||
262 !template->max_dif_sgl_segments || !template->dma_boundary) {
264 goto out_reghost_failed;
267 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
271 goto out_reghost_failed;
274 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
280 if (!get_device(dev) && dev) {
285 INIT_LIST_HEAD(&newrec->port_list);
286 INIT_LIST_HEAD(&newrec->endp_list);
287 kref_init(&newrec->ref);
288 newrec->ops = template;
290 ida_init(&newrec->endp_cnt);
291 newrec->localport.private = &newrec[1];
292 newrec->localport.node_name = pinfo->node_name;
293 newrec->localport.port_name = pinfo->port_name;
294 newrec->localport.port_role = pinfo->port_role;
295 newrec->localport.port_id = pinfo->port_id;
296 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
297 newrec->localport.port_num = idx;
299 spin_lock_irqsave(&nvme_fc_lock, flags);
300 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
301 spin_unlock_irqrestore(&nvme_fc_lock, flags);
304 dma_set_seg_boundary(dev, template->dma_boundary);
306 *portptr = &newrec->localport;
310 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
318 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
321 nvme_fc_free_lport(struct kref *ref)
323 struct nvme_fc_lport *lport =
324 container_of(ref, struct nvme_fc_lport, ref);
327 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
328 WARN_ON(!list_empty(&lport->endp_list));
330 /* remove from transport list */
331 spin_lock_irqsave(&nvme_fc_lock, flags);
332 list_del(&lport->port_list);
333 spin_unlock_irqrestore(&nvme_fc_lock, flags);
335 /* let the LLDD know we've finished tearing it down */
336 lport->ops->localport_delete(&lport->localport);
338 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
339 ida_destroy(&lport->endp_cnt);
341 put_device(lport->dev);
347 nvme_fc_lport_put(struct nvme_fc_lport *lport)
349 kref_put(&lport->ref, nvme_fc_free_lport);
353 nvme_fc_lport_get(struct nvme_fc_lport *lport)
355 return kref_get_unless_zero(&lport->ref);
359 * nvme_fc_unregister_localport - transport entry point called by an
360 * LLDD to deregister/remove a previously
361 * registered a NVME host FC port.
362 * @localport: pointer to the (registered) local port that is to be
366 * a completion status. Must be 0 upon success; a negative errno
367 * (ex: -ENXIO) upon failure.
370 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
372 struct nvme_fc_lport *lport = localport_to_lport(portptr);
378 spin_lock_irqsave(&nvme_fc_lock, flags);
380 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
381 spin_unlock_irqrestore(&nvme_fc_lock, flags);
384 portptr->port_state = FC_OBJSTATE_DELETED;
386 spin_unlock_irqrestore(&nvme_fc_lock, flags);
388 nvme_fc_lport_put(lport);
392 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
395 * nvme_fc_register_remoteport - transport entry point called by an
396 * LLDD to register the existence of a NVME
397 * subsystem FC port on its fabric.
398 * @localport: pointer to the (registered) local port that the remote
399 * subsystem port is connected to.
400 * @pinfo: pointer to information about the port to be registered
401 * @rport_p: pointer to a remote port pointer. Upon success, the routine
402 * will allocate a nvme_fc_remote_port structure and place its
403 * address in the remote port pointer. Upon failure, remote port
404 * pointer will be set to 0.
407 * a completion status. Must be 0 upon success; a negative errno
408 * (ex: -ENXIO) upon failure.
411 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
412 struct nvme_fc_port_info *pinfo,
413 struct nvme_fc_remote_port **portptr)
415 struct nvme_fc_lport *lport = localport_to_lport(localport);
416 struct nvme_fc_rport *newrec;
420 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
424 goto out_reghost_failed;
427 if (!nvme_fc_lport_get(lport)) {
429 goto out_kfree_rport;
432 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
438 INIT_LIST_HEAD(&newrec->endp_list);
439 INIT_LIST_HEAD(&newrec->ctrl_list);
440 INIT_LIST_HEAD(&newrec->ls_req_list);
441 kref_init(&newrec->ref);
442 spin_lock_init(&newrec->lock);
443 newrec->remoteport.localport = &lport->localport;
444 newrec->dev = lport->dev;
445 newrec->lport = lport;
446 newrec->remoteport.private = &newrec[1];
447 newrec->remoteport.port_role = pinfo->port_role;
448 newrec->remoteport.node_name = pinfo->node_name;
449 newrec->remoteport.port_name = pinfo->port_name;
450 newrec->remoteport.port_id = pinfo->port_id;
451 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
452 newrec->remoteport.port_num = idx;
454 spin_lock_irqsave(&nvme_fc_lock, flags);
455 list_add_tail(&newrec->endp_list, &lport->endp_list);
456 spin_unlock_irqrestore(&nvme_fc_lock, flags);
458 *portptr = &newrec->remoteport;
462 nvme_fc_lport_put(lport);
469 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
472 nvme_fc_free_rport(struct kref *ref)
474 struct nvme_fc_rport *rport =
475 container_of(ref, struct nvme_fc_rport, ref);
476 struct nvme_fc_lport *lport =
477 localport_to_lport(rport->remoteport.localport);
480 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
481 WARN_ON(!list_empty(&rport->ctrl_list));
483 /* remove from lport list */
484 spin_lock_irqsave(&nvme_fc_lock, flags);
485 list_del(&rport->endp_list);
486 spin_unlock_irqrestore(&nvme_fc_lock, flags);
488 /* let the LLDD know we've finished tearing it down */
489 lport->ops->remoteport_delete(&rport->remoteport);
491 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
495 nvme_fc_lport_put(lport);
499 nvme_fc_rport_put(struct nvme_fc_rport *rport)
501 kref_put(&rport->ref, nvme_fc_free_rport);
505 nvme_fc_rport_get(struct nvme_fc_rport *rport)
507 return kref_get_unless_zero(&rport->ref);
511 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
513 struct nvmefc_ls_req_op *lsop;
517 spin_lock_irqsave(&rport->lock, flags);
519 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
520 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
521 lsop->flags |= FCOP_FLAGS_TERMIO;
522 spin_unlock_irqrestore(&rport->lock, flags);
523 rport->lport->ops->ls_abort(&rport->lport->localport,
529 spin_unlock_irqrestore(&rport->lock, flags);
535 * nvme_fc_unregister_remoteport - transport entry point called by an
536 * LLDD to deregister/remove a previously
537 * registered a NVME subsystem FC port.
538 * @remoteport: pointer to the (registered) remote port that is to be
542 * a completion status. Must be 0 upon success; a negative errno
543 * (ex: -ENXIO) upon failure.
546 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
548 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
549 struct nvme_fc_ctrl *ctrl;
555 spin_lock_irqsave(&rport->lock, flags);
557 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
558 spin_unlock_irqrestore(&rport->lock, flags);
561 portptr->port_state = FC_OBJSTATE_DELETED;
563 /* tear down all associations to the remote port */
564 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
565 __nvme_fc_del_ctrl(ctrl);
567 spin_unlock_irqrestore(&rport->lock, flags);
569 nvme_fc_abort_lsops(rport);
571 nvme_fc_rport_put(rport);
574 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
577 /* *********************** FC-NVME DMA Handling **************************** */
580 * The fcloop device passes in a NULL device pointer. Real LLD's will
581 * pass in a valid device pointer. If NULL is passed to the dma mapping
582 * routines, depending on the platform, it may or may not succeed, and
586 * Wrapper all the dma routines and check the dev pointer.
588 * If simple mappings (return just a dma address, we'll noop them,
589 * returning a dma address of 0.
591 * On more complex mappings (dma_map_sg), a pseudo routine fills
592 * in the scatter list, setting all dma addresses to 0.
595 static inline dma_addr_t
596 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
597 enum dma_data_direction dir)
599 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
603 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
605 return dev ? dma_mapping_error(dev, dma_addr) : 0;
609 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
610 enum dma_data_direction dir)
613 dma_unmap_single(dev, addr, size, dir);
617 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
618 enum dma_data_direction dir)
621 dma_sync_single_for_cpu(dev, addr, size, dir);
625 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
626 enum dma_data_direction dir)
629 dma_sync_single_for_device(dev, addr, size, dir);
632 /* pseudo dma_map_sg call */
634 fc_map_sg(struct scatterlist *sg, int nents)
636 struct scatterlist *s;
639 WARN_ON(nents == 0 || sg[0].length == 0);
641 for_each_sg(sg, s, nents, i) {
643 #ifdef CONFIG_NEED_SG_DMA_LENGTH
644 s->dma_length = s->length;
651 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
652 enum dma_data_direction dir)
654 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
658 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
659 enum dma_data_direction dir)
662 dma_unmap_sg(dev, sg, nents, dir);
666 /* *********************** FC-NVME LS Handling **************************** */
668 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
669 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
673 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
675 struct nvme_fc_rport *rport = lsop->rport;
676 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
679 spin_lock_irqsave(&rport->lock, flags);
681 if (!lsop->req_queued) {
682 spin_unlock_irqrestore(&rport->lock, flags);
686 list_del(&lsop->lsreq_list);
688 lsop->req_queued = false;
690 spin_unlock_irqrestore(&rport->lock, flags);
692 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
693 (lsreq->rqstlen + lsreq->rsplen),
696 nvme_fc_rport_put(rport);
700 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
701 struct nvmefc_ls_req_op *lsop,
702 void (*done)(struct nvmefc_ls_req *req, int status))
704 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
708 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
709 return -ECONNREFUSED;
711 if (!nvme_fc_rport_get(rport))
716 lsop->req_queued = false;
717 INIT_LIST_HEAD(&lsop->lsreq_list);
718 init_completion(&lsop->ls_done);
720 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
721 lsreq->rqstlen + lsreq->rsplen,
723 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
727 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
729 spin_lock_irqsave(&rport->lock, flags);
731 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
733 lsop->req_queued = true;
735 spin_unlock_irqrestore(&rport->lock, flags);
737 ret = rport->lport->ops->ls_req(&rport->lport->localport,
738 &rport->remoteport, lsreq);
745 lsop->ls_error = ret;
746 spin_lock_irqsave(&rport->lock, flags);
747 lsop->req_queued = false;
748 list_del(&lsop->lsreq_list);
749 spin_unlock_irqrestore(&rport->lock, flags);
750 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
751 (lsreq->rqstlen + lsreq->rsplen),
754 nvme_fc_rport_put(rport);
760 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
762 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
764 lsop->ls_error = status;
765 complete(&lsop->ls_done);
769 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
771 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
772 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
775 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
779 * No timeout/not interruptible as we need the struct
780 * to exist until the lldd calls us back. Thus mandate
781 * wait until driver calls back. lldd responsible for
784 wait_for_completion(&lsop->ls_done);
786 __nvme_fc_finish_ls_req(lsop);
788 ret = lsop->ls_error;
794 /* ACC or RJT payload ? */
795 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
802 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
803 struct nvmefc_ls_req_op *lsop,
804 void (*done)(struct nvmefc_ls_req *req, int status))
806 /* don't wait for completion */
808 return __nvme_fc_send_ls_req(rport, lsop, done);
811 /* Validation Error indexes into the string table below */
815 VERR_LSDESC_RQST = 2,
816 VERR_LSDESC_RQST_LEN = 3,
818 VERR_ASSOC_ID_LEN = 5,
820 VERR_CONN_ID_LEN = 7,
822 VERR_CR_ASSOC_ACC_LEN = 9,
824 VERR_CR_CONN_ACC_LEN = 11,
826 VERR_DISCONN_ACC_LEN = 13,
829 static char *validation_errors[] = {
833 "Bad LSDESC_RQST Length",
834 "Not Association ID",
835 "Bad Association ID Length",
837 "Bad Connection ID Length",
839 "Bad CR_ASSOC ACC Length",
841 "Bad CR_CONN ACC Length",
842 "Not Disconnect Rqst",
843 "Bad Disconnect ACC Length",
847 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
848 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
850 struct nvmefc_ls_req_op *lsop;
851 struct nvmefc_ls_req *lsreq;
852 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
853 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
856 lsop = kzalloc((sizeof(*lsop) +
857 ctrl->lport->ops->lsrqst_priv_sz +
858 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
863 lsreq = &lsop->ls_req;
865 lsreq->private = (void *)&lsop[1];
866 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
867 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
868 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
870 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
871 assoc_rqst->desc_list_len =
872 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
874 assoc_rqst->assoc_cmd.desc_tag =
875 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
876 assoc_rqst->assoc_cmd.desc_len =
878 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
880 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
881 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
882 /* Linux supports only Dynamic controllers */
883 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
884 memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
885 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
886 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
887 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
888 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
889 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
892 lsreq->rqstaddr = assoc_rqst;
893 lsreq->rqstlen = sizeof(*assoc_rqst);
894 lsreq->rspaddr = assoc_acc;
895 lsreq->rsplen = sizeof(*assoc_acc);
896 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
898 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
900 goto out_free_buffer;
902 /* process connect LS completion */
904 /* validate the ACC response */
905 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
907 else if (assoc_acc->hdr.desc_list_len !=
909 sizeof(struct fcnvme_ls_cr_assoc_acc)))
910 fcret = VERR_CR_ASSOC_ACC_LEN;
911 else if (assoc_acc->hdr.rqst.desc_tag !=
912 cpu_to_be32(FCNVME_LSDESC_RQST))
913 fcret = VERR_LSDESC_RQST;
914 else if (assoc_acc->hdr.rqst.desc_len !=
915 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
916 fcret = VERR_LSDESC_RQST_LEN;
917 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
918 fcret = VERR_CR_ASSOC;
919 else if (assoc_acc->associd.desc_tag !=
920 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
921 fcret = VERR_ASSOC_ID;
922 else if (assoc_acc->associd.desc_len !=
924 sizeof(struct fcnvme_lsdesc_assoc_id)))
925 fcret = VERR_ASSOC_ID_LEN;
926 else if (assoc_acc->connectid.desc_tag !=
927 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
928 fcret = VERR_CONN_ID;
929 else if (assoc_acc->connectid.desc_len !=
930 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
931 fcret = VERR_CONN_ID_LEN;
936 "q %d connect failed: %s\n",
937 queue->qnum, validation_errors[fcret]);
939 ctrl->association_id =
940 be64_to_cpu(assoc_acc->associd.association_id);
941 queue->connection_id =
942 be64_to_cpu(assoc_acc->connectid.connection_id);
943 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
951 "queue %d connect admin queue failed (%d).\n",
957 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
958 u16 qsize, u16 ersp_ratio)
960 struct nvmefc_ls_req_op *lsop;
961 struct nvmefc_ls_req *lsreq;
962 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
963 struct fcnvme_ls_cr_conn_acc *conn_acc;
966 lsop = kzalloc((sizeof(*lsop) +
967 ctrl->lport->ops->lsrqst_priv_sz +
968 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
973 lsreq = &lsop->ls_req;
975 lsreq->private = (void *)&lsop[1];
976 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
977 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
978 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
980 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
981 conn_rqst->desc_list_len = cpu_to_be32(
982 sizeof(struct fcnvme_lsdesc_assoc_id) +
983 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
985 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
986 conn_rqst->associd.desc_len =
988 sizeof(struct fcnvme_lsdesc_assoc_id));
989 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
990 conn_rqst->connect_cmd.desc_tag =
991 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
992 conn_rqst->connect_cmd.desc_len =
994 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
995 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
996 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
997 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
1000 lsreq->rqstaddr = conn_rqst;
1001 lsreq->rqstlen = sizeof(*conn_rqst);
1002 lsreq->rspaddr = conn_acc;
1003 lsreq->rsplen = sizeof(*conn_acc);
1004 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1006 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1008 goto out_free_buffer;
1010 /* process connect LS completion */
1012 /* validate the ACC response */
1013 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1015 else if (conn_acc->hdr.desc_list_len !=
1016 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1017 fcret = VERR_CR_CONN_ACC_LEN;
1018 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1019 fcret = VERR_LSDESC_RQST;
1020 else if (conn_acc->hdr.rqst.desc_len !=
1021 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1022 fcret = VERR_LSDESC_RQST_LEN;
1023 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1024 fcret = VERR_CR_CONN;
1025 else if (conn_acc->connectid.desc_tag !=
1026 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1027 fcret = VERR_CONN_ID;
1028 else if (conn_acc->connectid.desc_len !=
1029 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1030 fcret = VERR_CONN_ID_LEN;
1035 "q %d connect failed: %s\n",
1036 queue->qnum, validation_errors[fcret]);
1038 queue->connection_id =
1039 be64_to_cpu(conn_acc->connectid.connection_id);
1040 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1048 "queue %d connect command failed (%d).\n",
1054 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1056 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1058 __nvme_fc_finish_ls_req(lsop);
1060 /* fc-nvme iniator doesn't care about success or failure of cmd */
1066 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1067 * the FC-NVME Association. Terminating the association also
1068 * terminates the FC-NVME connections (per queue, both admin and io
1069 * queues) that are part of the association. E.g. things are torn
1070 * down, and the related FC-NVME Association ID and Connection IDs
1073 * The behavior of the fc-nvme initiator is such that it's
1074 * understanding of the association and connections will implicitly
1075 * be torn down. The action is implicit as it may be due to a loss of
1076 * connectivity with the fc-nvme target, so you may never get a
1077 * response even if you tried. As such, the action of this routine
1078 * is to asynchronously send the LS, ignore any results of the LS, and
1079 * continue on with terminating the association. If the fc-nvme target
1080 * is present and receives the LS, it too can tear down.
1083 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1085 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1086 struct fcnvme_ls_disconnect_acc *discon_acc;
1087 struct nvmefc_ls_req_op *lsop;
1088 struct nvmefc_ls_req *lsreq;
1091 lsop = kzalloc((sizeof(*lsop) +
1092 ctrl->lport->ops->lsrqst_priv_sz +
1093 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1096 /* couldn't sent it... too bad */
1099 lsreq = &lsop->ls_req;
1101 lsreq->private = (void *)&lsop[1];
1102 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1103 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1104 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1106 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1107 discon_rqst->desc_list_len = cpu_to_be32(
1108 sizeof(struct fcnvme_lsdesc_assoc_id) +
1109 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1111 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1112 discon_rqst->associd.desc_len =
1114 sizeof(struct fcnvme_lsdesc_assoc_id));
1116 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1118 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1119 FCNVME_LSDESC_DISCONN_CMD);
1120 discon_rqst->discon_cmd.desc_len =
1122 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1123 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1124 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1126 lsreq->rqstaddr = discon_rqst;
1127 lsreq->rqstlen = sizeof(*discon_rqst);
1128 lsreq->rspaddr = discon_acc;
1129 lsreq->rsplen = sizeof(*discon_acc);
1130 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1132 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1133 nvme_fc_disconnect_assoc_done);
1137 /* only meaningful part to terminating the association */
1138 ctrl->association_id = 0;
1142 /* *********************** NVME Ctrl Routines **************************** */
1144 static void __nvme_fc_final_op_cleanup(struct request *rq);
1147 nvme_fc_reinit_request(void *data, struct request *rq)
1149 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1150 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1152 memset(cmdiu, 0, sizeof(*cmdiu));
1153 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1154 cmdiu->fc_id = NVME_CMD_FC_ID;
1155 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1156 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1162 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1163 struct nvme_fc_fcp_op *op)
1165 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1166 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1167 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1168 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1170 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1174 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1175 unsigned int hctx_idx)
1177 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1179 return __nvme_fc_exit_request(set->driver_data, op);
1183 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1187 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1188 if (state != FCPOP_STATE_ACTIVE) {
1189 atomic_set(&op->state, state);
1193 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1194 &ctrl->rport->remoteport,
1195 op->queue->lldd_handle,
1202 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1204 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1205 unsigned long flags;
1208 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1209 if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
1212 spin_lock_irqsave(&ctrl->lock, flags);
1213 if (ctrl->flags & FCCTRL_TERMIO) {
1215 aen_op->flags |= FCOP_FLAGS_TERMIO;
1217 spin_unlock_irqrestore(&ctrl->lock, flags);
1219 ret = __nvme_fc_abort_op(ctrl, aen_op);
1222 * if __nvme_fc_abort_op failed the io wasn't
1223 * active. Thus this call path is running in
1224 * parallel to the io complete. Treat as non-error.
1227 /* back out the flags/counters */
1228 spin_lock_irqsave(&ctrl->lock, flags);
1229 if (ctrl->flags & FCCTRL_TERMIO)
1231 aen_op->flags &= ~FCOP_FLAGS_TERMIO;
1232 spin_unlock_irqrestore(&ctrl->lock, flags);
1239 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1240 struct nvme_fc_fcp_op *op)
1242 unsigned long flags;
1243 bool complete_rq = false;
1245 spin_lock_irqsave(&ctrl->lock, flags);
1246 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1247 if (ctrl->flags & FCCTRL_TERMIO)
1250 if (op->flags & FCOP_FLAGS_RELEASED)
1253 op->flags |= FCOP_FLAGS_COMPLETE;
1254 spin_unlock_irqrestore(&ctrl->lock, flags);
1260 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1262 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1263 struct request *rq = op->rq;
1264 struct nvmefc_fcp_req *freq = &op->fcp_req;
1265 struct nvme_fc_ctrl *ctrl = op->ctrl;
1266 struct nvme_fc_queue *queue = op->queue;
1267 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1268 struct nvme_command *sqe = &op->cmd_iu.sqe;
1269 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1270 union nvme_result result;
1275 * The current linux implementation of a nvme controller
1276 * allocates a single tag set for all io queues and sizes
1277 * the io queues to fully hold all possible tags. Thus, the
1278 * implementation does not reference or care about the sqhd
1279 * value as it never needs to use the sqhd/sqtail pointers
1280 * for submission pacing.
1282 * This affects the FC-NVME implementation in two ways:
1283 * 1) As the value doesn't matter, we don't need to waste
1284 * cycles extracting it from ERSPs and stamping it in the
1285 * cases where the transport fabricates CQEs on successful
1287 * 2) The FC-NVME implementation requires that delivery of
1288 * ERSP completions are to go back to the nvme layer in order
1289 * relative to the rsn, such that the sqhd value will always
1290 * be "in order" for the nvme layer. As the nvme layer in
1291 * linux doesn't care about sqhd, there's no need to return
1295 * As the core nvme layer in linux currently does not look at
1296 * every field in the cqe - in cases where the FC transport must
1297 * fabricate a CQE, the following fields will not be set as they
1298 * are not referenced:
1299 * cqe.sqid, cqe.sqhd, cqe.command_id
1302 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1303 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1305 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1306 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1307 else if (freq->status)
1308 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1311 * For the linux implementation, if we have an unsuccesful
1312 * status, they blk-mq layer can typically be called with the
1313 * non-zero status and the content of the cqe isn't important.
1319 * command completed successfully relative to the wire
1320 * protocol. However, validate anything received and
1321 * extract the status and result from the cqe (create it
1325 switch (freq->rcv_rsplen) {
1328 case NVME_FC_SIZEOF_ZEROS_RSP:
1330 * No response payload or 12 bytes of payload (which
1331 * should all be zeros) are considered successful and
1332 * no payload in the CQE by the transport.
1334 if (freq->transferred_length !=
1335 be32_to_cpu(op->cmd_iu.data_len)) {
1336 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1342 case sizeof(struct nvme_fc_ersp_iu):
1344 * The ERSP IU contains a full completion with CQE.
1345 * Validate ERSP IU and look at cqe.
1347 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1348 (freq->rcv_rsplen / 4) ||
1349 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1350 freq->transferred_length ||
1351 op->rsp_iu.status_code ||
1352 sqe->common.command_id != cqe->command_id)) {
1353 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1356 result = cqe->result;
1357 status = cqe->status;
1361 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
1366 if (op->flags & FCOP_FLAGS_AEN) {
1367 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
1368 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1369 atomic_set(&op->state, FCPOP_STATE_IDLE);
1370 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1371 nvme_fc_ctrl_put(ctrl);
1375 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
1377 if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
1378 status = cpu_to_le16(NVME_SC_ABORT_REQ);
1379 if (blk_queue_dying(rq->q))
1380 status |= cpu_to_le16(NVME_SC_DNR);
1382 nvme_end_request(rq, status, result);
1384 __nvme_fc_final_op_cleanup(rq);
1388 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1389 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1390 struct request *rq, u32 rqno)
1392 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1395 memset(op, 0, sizeof(*op));
1396 op->fcp_req.cmdaddr = &op->cmd_iu;
1397 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1398 op->fcp_req.rspaddr = &op->rsp_iu;
1399 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1400 op->fcp_req.done = nvme_fc_fcpio_done;
1401 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1402 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1408 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1409 cmdiu->fc_id = NVME_CMD_FC_ID;
1410 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1412 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1413 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1414 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1416 "FCP Op failed - cmdiu dma mapping failed.\n");
1421 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1422 &op->rsp_iu, sizeof(op->rsp_iu),
1424 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1426 "FCP Op failed - rspiu dma mapping failed.\n");
1430 atomic_set(&op->state, FCPOP_STATE_IDLE);
1436 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
1437 unsigned int hctx_idx, unsigned int numa_node)
1439 struct nvme_fc_ctrl *ctrl = set->driver_data;
1440 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1441 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1443 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1447 nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
1448 unsigned int hctx_idx, unsigned int numa_node)
1450 struct nvme_fc_ctrl *ctrl = set->driver_data;
1451 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1452 struct nvme_fc_queue *queue = &ctrl->queues[0];
1454 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1458 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1460 struct nvme_fc_fcp_op *aen_op;
1461 struct nvme_fc_cmd_iu *cmdiu;
1462 struct nvme_command *sqe;
1466 aen_op = ctrl->aen_ops;
1467 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1468 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
1473 cmdiu = &aen_op->cmd_iu;
1475 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1476 aen_op, (struct request *)NULL,
1477 (AEN_CMDID_BASE + i));
1483 aen_op->flags = FCOP_FLAGS_AEN;
1484 aen_op->fcp_req.first_sgl = NULL; /* no sg list */
1485 aen_op->fcp_req.private = private;
1487 memset(sqe, 0, sizeof(*sqe));
1488 sqe->common.opcode = nvme_admin_async_event;
1489 /* Note: core layer may overwrite the sqe.command_id value */
1490 sqe->common.command_id = AEN_CMDID_BASE + i;
1496 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
1498 struct nvme_fc_fcp_op *aen_op;
1501 aen_op = ctrl->aen_ops;
1502 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1503 if (!aen_op->fcp_req.private)
1506 __nvme_fc_exit_request(ctrl, aen_op);
1508 kfree(aen_op->fcp_req.private);
1509 aen_op->fcp_req.private = NULL;
1514 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1517 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1519 hctx->driver_data = queue;
1524 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1525 unsigned int hctx_idx)
1527 struct nvme_fc_ctrl *ctrl = data;
1529 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1535 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1536 unsigned int hctx_idx)
1538 struct nvme_fc_ctrl *ctrl = data;
1540 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1546 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1548 struct nvme_fc_queue *queue;
1550 queue = &ctrl->queues[idx];
1551 memset(queue, 0, sizeof(*queue));
1554 atomic_set(&queue->csn, 1);
1555 queue->dev = ctrl->dev;
1558 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1560 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1562 queue->queue_size = queue_size;
1565 * Considered whether we should allocate buffers for all SQEs
1566 * and CQEs and dma map them - mapping their respective entries
1567 * into the request structures (kernel vm addr and dma address)
1568 * thus the driver could use the buffers/mappings directly.
1569 * It only makes sense if the LLDD would use them for its
1570 * messaging api. It's very unlikely most adapter api's would use
1571 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1572 * structures were used instead.
1577 * This routine terminates a queue at the transport level.
1578 * The transport has already ensured that all outstanding ios on
1579 * the queue have been terminated.
1580 * The transport will send a Disconnect LS request to terminate
1581 * the queue's connection. Termination of the admin queue will also
1582 * terminate the association at the target.
1585 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1587 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1591 * Current implementation never disconnects a single queue.
1592 * It always terminates a whole association. So there is never
1593 * a disconnect(queue) LS sent to the target.
1596 queue->connection_id = 0;
1597 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1601 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1602 struct nvme_fc_queue *queue, unsigned int qidx)
1604 if (ctrl->lport->ops->delete_queue)
1605 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1606 queue->lldd_handle);
1607 queue->lldd_handle = NULL;
1611 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1615 for (i = 1; i < ctrl->queue_count; i++)
1616 nvme_fc_free_queue(&ctrl->queues[i]);
1620 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1621 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1625 queue->lldd_handle = NULL;
1626 if (ctrl->lport->ops->create_queue)
1627 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1628 qidx, qsize, &queue->lldd_handle);
1634 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1636 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1639 for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1640 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1644 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1646 struct nvme_fc_queue *queue = &ctrl->queues[1];
1649 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1650 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1659 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1664 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1668 for (i = 1; i < ctrl->queue_count; i++) {
1669 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1673 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1682 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1686 for (i = 1; i < ctrl->queue_count; i++)
1687 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1691 nvme_fc_ctrl_free(struct kref *ref)
1693 struct nvme_fc_ctrl *ctrl =
1694 container_of(ref, struct nvme_fc_ctrl, ref);
1695 unsigned long flags;
1697 if (ctrl->ctrl.tagset) {
1698 blk_cleanup_queue(ctrl->ctrl.connect_q);
1699 blk_mq_free_tag_set(&ctrl->tag_set);
1702 /* remove from rport list */
1703 spin_lock_irqsave(&ctrl->rport->lock, flags);
1704 list_del(&ctrl->ctrl_list);
1705 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1707 blk_cleanup_queue(ctrl->ctrl.admin_q);
1708 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1710 kfree(ctrl->queues);
1712 put_device(ctrl->dev);
1713 nvme_fc_rport_put(ctrl->rport);
1715 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1716 if (ctrl->ctrl.opts)
1717 nvmf_free_options(ctrl->ctrl.opts);
1722 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1724 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1728 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1730 return kref_get_unless_zero(&ctrl->ref);
1734 * All accesses from nvme core layer done - can now free the
1735 * controller. Called after last nvme_put_ctrl() call
1738 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
1740 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1742 WARN_ON(nctrl != &ctrl->ctrl);
1744 nvme_fc_ctrl_put(ctrl);
1748 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
1750 dev_warn(ctrl->ctrl.device,
1751 "NVME-FC{%d}: transport association error detected: %s\n",
1752 ctrl->cnum, errmsg);
1753 dev_info(ctrl->ctrl.device,
1754 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
1756 /* stop the queues on error, cleanup is in reset thread */
1757 if (ctrl->queue_count > 1)
1758 nvme_stop_queues(&ctrl->ctrl);
1760 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
1761 dev_err(ctrl->ctrl.device,
1762 "NVME-FC{%d}: error_recovery: Couldn't change state "
1763 "to RECONNECTING\n", ctrl->cnum);
1767 if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
1768 dev_err(ctrl->ctrl.device,
1769 "NVME-FC{%d}: error_recovery: Failed to schedule "
1770 "reset work\n", ctrl->cnum);
1773 static enum blk_eh_timer_return
1774 nvme_fc_timeout(struct request *rq, bool reserved)
1776 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1777 struct nvme_fc_ctrl *ctrl = op->ctrl;
1781 return BLK_EH_RESET_TIMER;
1783 ret = __nvme_fc_abort_op(ctrl, op);
1785 /* io wasn't active to abort consider it done */
1786 return BLK_EH_HANDLED;
1789 * we can't individually ABTS an io without affecting the queue,
1790 * thus killing the queue, adn thus the association.
1791 * So resolve by performing a controller reset, which will stop
1792 * the host/io stack, terminate the association on the link,
1793 * and recreate an association on the link.
1795 nvme_fc_error_recovery(ctrl, "io timeout error");
1797 return BLK_EH_HANDLED;
1801 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1802 struct nvme_fc_fcp_op *op)
1804 struct nvmefc_fcp_req *freq = &op->fcp_req;
1805 enum dma_data_direction dir;
1810 if (!blk_rq_payload_bytes(rq))
1813 freq->sg_table.sgl = freq->first_sgl;
1814 ret = sg_alloc_table_chained(&freq->sg_table,
1815 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1819 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1820 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1821 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1822 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1824 if (unlikely(freq->sg_cnt <= 0)) {
1825 sg_free_table_chained(&freq->sg_table, true);
1831 * TODO: blk_integrity_rq(rq) for DIF
1837 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1838 struct nvme_fc_fcp_op *op)
1840 struct nvmefc_fcp_req *freq = &op->fcp_req;
1845 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1846 ((rq_data_dir(rq) == WRITE) ?
1847 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1849 nvme_cleanup_cmd(rq);
1851 sg_free_table_chained(&freq->sg_table, true);
1857 * In FC, the queue is a logical thing. At transport connect, the target
1858 * creates its "queue" and returns a handle that is to be given to the
1859 * target whenever it posts something to the corresponding SQ. When an
1860 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1861 * command contained within the SQE, an io, and assigns a FC exchange
1862 * to it. The SQE and the associated SQ handle are sent in the initial
1863 * CMD IU sents on the exchange. All transfers relative to the io occur
1864 * as part of the exchange. The CQE is the last thing for the io,
1865 * which is transferred (explicitly or implicitly) with the RSP IU
1866 * sent on the exchange. After the CQE is received, the FC exchange is
1867 * terminaed and the Exchange may be used on a different io.
1869 * The transport to LLDD api has the transport making a request for a
1870 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1871 * resource and transfers the command. The LLDD will then process all
1872 * steps to complete the io. Upon completion, the transport done routine
1875 * So - while the operation is outstanding to the LLDD, there is a link
1876 * level FC exchange resource that is also outstanding. This must be
1877 * considered in all cleanup operations.
1880 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1881 struct nvme_fc_fcp_op *op, u32 data_len,
1882 enum nvmefc_fcp_datadir io_dir)
1884 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1885 struct nvme_command *sqe = &cmdiu->sqe;
1890 * before attempting to send the io, check to see if we believe
1891 * the target device is present
1893 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1894 return BLK_MQ_RQ_QUEUE_ERROR;
1896 if (!nvme_fc_ctrl_get(ctrl))
1897 return BLK_MQ_RQ_QUEUE_ERROR;
1899 /* format the FC-NVME CMD IU and fcp_req */
1900 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1901 csn = atomic_inc_return(&queue->csn);
1902 cmdiu->csn = cpu_to_be32(csn);
1903 cmdiu->data_len = cpu_to_be32(data_len);
1905 case NVMEFC_FCP_WRITE:
1906 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1908 case NVMEFC_FCP_READ:
1909 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1911 case NVMEFC_FCP_NODATA:
1915 op->fcp_req.payload_length = data_len;
1916 op->fcp_req.io_dir = io_dir;
1917 op->fcp_req.transferred_length = 0;
1918 op->fcp_req.rcv_rsplen = 0;
1919 op->fcp_req.status = NVME_SC_SUCCESS;
1920 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1923 * validate per fabric rules, set fields mandated by fabric spec
1924 * as well as those by FC-NVME spec.
1926 WARN_ON_ONCE(sqe->common.metadata);
1927 WARN_ON_ONCE(sqe->common.dptr.prp1);
1928 WARN_ON_ONCE(sqe->common.dptr.prp2);
1929 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1932 * format SQE DPTR field per FC-NVME rules
1933 * type=data block descr; subtype=offset;
1934 * offset is currently 0.
1936 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1937 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1938 sqe->rw.dptr.sgl.addr = 0;
1940 if (!(op->flags & FCOP_FLAGS_AEN)) {
1941 ret = nvme_fc_map_data(ctrl, op->rq, op);
1943 nvme_cleanup_cmd(op->rq);
1944 nvme_fc_ctrl_put(ctrl);
1945 return (ret == -ENOMEM || ret == -EAGAIN) ?
1946 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1950 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1951 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1953 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1955 if (!(op->flags & FCOP_FLAGS_AEN))
1956 blk_mq_start_request(op->rq);
1958 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1959 &ctrl->rport->remoteport,
1960 queue->lldd_handle, &op->fcp_req);
1963 if (op->rq) { /* normal request */
1964 nvme_fc_unmap_data(ctrl, op->rq, op);
1965 nvme_cleanup_cmd(op->rq);
1967 /* else - aen. no cleanup needed */
1969 nvme_fc_ctrl_put(ctrl);
1972 return BLK_MQ_RQ_QUEUE_ERROR;
1975 blk_mq_stop_hw_queues(op->rq->q);
1976 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1978 return BLK_MQ_RQ_QUEUE_BUSY;
1981 return BLK_MQ_RQ_QUEUE_OK;
1985 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1986 const struct blk_mq_queue_data *bd)
1988 struct nvme_ns *ns = hctx->queue->queuedata;
1989 struct nvme_fc_queue *queue = hctx->driver_data;
1990 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1991 struct request *rq = bd->rq;
1992 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1993 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1994 struct nvme_command *sqe = &cmdiu->sqe;
1995 enum nvmefc_fcp_datadir io_dir;
1999 ret = nvme_setup_cmd(ns, rq, sqe);
2003 data_len = blk_rq_payload_bytes(rq);
2005 io_dir = ((rq_data_dir(rq) == WRITE) ?
2006 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2008 io_dir = NVMEFC_FCP_NODATA;
2010 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2013 static struct blk_mq_tags *
2014 nvme_fc_tagset(struct nvme_fc_queue *queue)
2016 if (queue->qnum == 0)
2017 return queue->ctrl->admin_tag_set.tags[queue->qnum];
2019 return queue->ctrl->tag_set.tags[queue->qnum - 1];
2023 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
2026 struct nvme_fc_queue *queue = hctx->driver_data;
2027 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2028 struct request *req;
2029 struct nvme_fc_fcp_op *op;
2031 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
2035 op = blk_mq_rq_to_pdu(req);
2037 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
2038 (ctrl->lport->ops->poll_queue))
2039 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
2040 queue->lldd_handle);
2042 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
2046 nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2048 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2049 struct nvme_fc_fcp_op *aen_op;
2050 unsigned long flags;
2051 bool terminating = false;
2054 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2057 spin_lock_irqsave(&ctrl->lock, flags);
2058 if (ctrl->flags & FCCTRL_TERMIO)
2060 spin_unlock_irqrestore(&ctrl->lock, flags);
2065 aen_op = &ctrl->aen_ops[aer_idx];
2067 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2070 dev_err(ctrl->ctrl.device,
2071 "failed async event work [%d]\n", aer_idx);
2075 __nvme_fc_final_op_cleanup(struct request *rq)
2077 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2078 struct nvme_fc_ctrl *ctrl = op->ctrl;
2080 atomic_set(&op->state, FCPOP_STATE_IDLE);
2081 op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
2082 FCOP_FLAGS_COMPLETE);
2084 nvme_cleanup_cmd(rq);
2085 nvme_fc_unmap_data(ctrl, rq, op);
2086 nvme_complete_rq(rq);
2087 nvme_fc_ctrl_put(ctrl);
2092 nvme_fc_complete_rq(struct request *rq)
2094 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2095 struct nvme_fc_ctrl *ctrl = op->ctrl;
2096 unsigned long flags;
2097 bool completed = false;
2100 * the core layer, on controller resets after calling
2101 * nvme_shutdown_ctrl(), calls complete_rq without our
2102 * calling blk_mq_complete_request(), thus there may still
2103 * be live i/o outstanding with the LLDD. Means transport has
2104 * to track complete calls vs fcpio_done calls to know what
2105 * path to take on completes and dones.
2107 spin_lock_irqsave(&ctrl->lock, flags);
2108 if (op->flags & FCOP_FLAGS_COMPLETE)
2111 op->flags |= FCOP_FLAGS_RELEASED;
2112 spin_unlock_irqrestore(&ctrl->lock, flags);
2115 __nvme_fc_final_op_cleanup(rq);
2119 * This routine is used by the transport when it needs to find active
2120 * io on a queue that is to be terminated. The transport uses
2121 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2122 * this routine to kill them on a 1 by 1 basis.
2124 * As FC allocates FC exchange for each io, the transport must contact
2125 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2126 * After terminating the exchange the LLDD will call the transport's
2127 * normal io done path for the request, but it will have an aborted
2128 * status. The done path will return the io request back to the block
2129 * layer with an error status.
2132 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2134 struct nvme_ctrl *nctrl = data;
2135 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2136 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2137 unsigned long flags;
2140 if (!blk_mq_request_started(req))
2143 spin_lock_irqsave(&ctrl->lock, flags);
2144 if (ctrl->flags & FCCTRL_TERMIO) {
2146 op->flags |= FCOP_FLAGS_TERMIO;
2148 spin_unlock_irqrestore(&ctrl->lock, flags);
2150 status = __nvme_fc_abort_op(ctrl, op);
2153 * if __nvme_fc_abort_op failed the io wasn't
2154 * active. Thus this call path is running in
2155 * parallel to the io complete. Treat as non-error.
2158 /* back out the flags/counters */
2159 spin_lock_irqsave(&ctrl->lock, flags);
2160 if (ctrl->flags & FCCTRL_TERMIO)
2162 op->flags &= ~FCOP_FLAGS_TERMIO;
2163 spin_unlock_irqrestore(&ctrl->lock, flags);
2169 static const struct blk_mq_ops nvme_fc_mq_ops = {
2170 .queue_rq = nvme_fc_queue_rq,
2171 .complete = nvme_fc_complete_rq,
2172 .init_request = nvme_fc_init_request,
2173 .exit_request = nvme_fc_exit_request,
2174 .reinit_request = nvme_fc_reinit_request,
2175 .init_hctx = nvme_fc_init_hctx,
2176 .poll = nvme_fc_poll,
2177 .timeout = nvme_fc_timeout,
2181 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2183 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2186 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2188 dev_info(ctrl->ctrl.device,
2189 "set_queue_count failed: %d\n", ret);
2193 ctrl->queue_count = opts->nr_io_queues + 1;
2194 if (!opts->nr_io_queues)
2197 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2198 opts->nr_io_queues);
2200 nvme_fc_init_io_queues(ctrl);
2202 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2203 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2204 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2205 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2206 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2207 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2208 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2210 sizeof(struct scatterlist)) +
2211 ctrl->lport->ops->fcprqst_priv_sz;
2212 ctrl->tag_set.driver_data = ctrl;
2213 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2214 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2216 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2220 ctrl->ctrl.tagset = &ctrl->tag_set;
2222 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2223 if (IS_ERR(ctrl->ctrl.connect_q)) {
2224 ret = PTR_ERR(ctrl->ctrl.connect_q);
2225 goto out_free_tag_set;
2228 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2230 goto out_cleanup_blk_queue;
2232 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2234 goto out_delete_hw_queues;
2238 out_delete_hw_queues:
2239 nvme_fc_delete_hw_io_queues(ctrl);
2240 out_cleanup_blk_queue:
2241 nvme_stop_keep_alive(&ctrl->ctrl);
2242 blk_cleanup_queue(ctrl->ctrl.connect_q);
2244 blk_mq_free_tag_set(&ctrl->tag_set);
2245 nvme_fc_free_io_queues(ctrl);
2247 /* force put free routine to ignore io queues */
2248 ctrl->ctrl.tagset = NULL;
2254 nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2256 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2259 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2261 dev_info(ctrl->ctrl.device,
2262 "set_queue_count failed: %d\n", ret);
2266 /* check for io queues existing */
2267 if (ctrl->queue_count == 1)
2270 dev_info(ctrl->ctrl.device, "Recreating %d I/O queues.\n",
2271 opts->nr_io_queues);
2273 nvme_fc_init_io_queues(ctrl);
2275 ret = blk_mq_reinit_tagset(&ctrl->tag_set);
2277 goto out_free_io_queues;
2279 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2281 goto out_free_io_queues;
2283 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2285 goto out_delete_hw_queues;
2289 out_delete_hw_queues:
2290 nvme_fc_delete_hw_io_queues(ctrl);
2292 nvme_fc_free_io_queues(ctrl);
2297 * This routine restarts the controller on the host side, and
2298 * on the link side, recreates the controller association.
2301 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2303 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2308 ctrl->connect_attempts++;
2311 * Create the admin queue
2314 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
2316 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2317 NVME_FC_AQ_BLKMQ_DEPTH);
2319 goto out_free_queue;
2321 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2322 NVME_FC_AQ_BLKMQ_DEPTH,
2323 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
2325 goto out_delete_hw_queue;
2327 if (ctrl->ctrl.state != NVME_CTRL_NEW)
2328 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
2330 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
2332 goto out_disconnect_admin_queue;
2335 * Check controller capabilities
2337 * todo:- add code to check if ctrl attributes changed from
2338 * prior connection values
2341 ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2343 dev_err(ctrl->ctrl.device,
2344 "prop_get NVME_REG_CAP failed\n");
2345 goto out_disconnect_admin_queue;
2349 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2351 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2353 goto out_disconnect_admin_queue;
2355 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2356 ctrl->lport->ops->max_sgl_segments);
2357 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2359 ret = nvme_init_identify(&ctrl->ctrl);
2361 goto out_disconnect_admin_queue;
2365 /* FC-NVME does not have other data in the capsule */
2366 if (ctrl->ctrl.icdoff) {
2367 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2369 goto out_disconnect_admin_queue;
2372 nvme_start_keep_alive(&ctrl->ctrl);
2374 /* FC-NVME supports normal SGL Data Block Descriptors */
2376 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2377 /* warn if maxcmd is lower than queue_size */
2378 dev_warn(ctrl->ctrl.device,
2379 "queue_size %zu > ctrl maxcmd %u, reducing "
2381 opts->queue_size, ctrl->ctrl.maxcmd);
2382 opts->queue_size = ctrl->ctrl.maxcmd;
2385 ret = nvme_fc_init_aen_ops(ctrl);
2387 goto out_term_aen_ops;
2390 * Create the io queues
2393 if (ctrl->queue_count > 1) {
2394 if (ctrl->ctrl.state == NVME_CTRL_NEW)
2395 ret = nvme_fc_create_io_queues(ctrl);
2397 ret = nvme_fc_reinit_io_queues(ctrl);
2399 goto out_term_aen_ops;
2402 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2403 WARN_ON_ONCE(!changed);
2405 ctrl->connect_attempts = 0;
2407 kref_get(&ctrl->ctrl.kref);
2409 if (ctrl->queue_count > 1) {
2410 nvme_start_queues(&ctrl->ctrl);
2411 nvme_queue_scan(&ctrl->ctrl);
2412 nvme_queue_async_events(&ctrl->ctrl);
2415 return 0; /* Success */
2418 nvme_fc_term_aen_ops(ctrl);
2419 nvme_stop_keep_alive(&ctrl->ctrl);
2420 out_disconnect_admin_queue:
2421 /* send a Disconnect(association) LS to fc-nvme target */
2422 nvme_fc_xmt_disconnect_assoc(ctrl);
2423 out_delete_hw_queue:
2424 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2426 nvme_fc_free_queue(&ctrl->queues[0]);
2432 * This routine stops operation of the controller on the host side.
2433 * On the host os stack side: Admin and IO queues are stopped,
2434 * outstanding ios on them terminated via FC ABTS.
2435 * On the link side: the association is terminated.
2438 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2440 unsigned long flags;
2442 nvme_stop_keep_alive(&ctrl->ctrl);
2444 spin_lock_irqsave(&ctrl->lock, flags);
2445 ctrl->flags |= FCCTRL_TERMIO;
2447 spin_unlock_irqrestore(&ctrl->lock, flags);
2450 * If io queues are present, stop them and terminate all outstanding
2451 * ios on them. As FC allocates FC exchange for each io, the
2452 * transport must contact the LLDD to terminate the exchange,
2453 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2454 * to tell us what io's are busy and invoke a transport routine
2455 * to kill them with the LLDD. After terminating the exchange
2456 * the LLDD will call the transport's normal io done path, but it
2457 * will have an aborted status. The done path will return the
2458 * io requests back to the block layer as part of normal completions
2459 * (but with error status).
2461 if (ctrl->queue_count > 1) {
2462 nvme_stop_queues(&ctrl->ctrl);
2463 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2464 nvme_fc_terminate_exchange, &ctrl->ctrl);
2468 * Other transports, which don't have link-level contexts bound
2469 * to sqe's, would try to gracefully shutdown the controller by
2470 * writing the registers for shutdown and polling (call
2471 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2472 * just aborted and we will wait on those contexts, and given
2473 * there was no indication of how live the controlelr is on the
2474 * link, don't send more io to create more contexts for the
2475 * shutdown. Let the controller fail via keepalive failure if
2476 * its still present.
2480 * clean up the admin queue. Same thing as above.
2481 * use blk_mq_tagset_busy_itr() and the transport routine to
2482 * terminate the exchanges.
2484 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2485 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2486 nvme_fc_terminate_exchange, &ctrl->ctrl);
2488 /* kill the aens as they are a separate path */
2489 nvme_fc_abort_aen_ops(ctrl);
2491 /* wait for all io that had to be aborted */
2492 spin_lock_irqsave(&ctrl->lock, flags);
2493 while (ctrl->iocnt) {
2494 spin_unlock_irqrestore(&ctrl->lock, flags);
2496 spin_lock_irqsave(&ctrl->lock, flags);
2498 ctrl->flags &= ~FCCTRL_TERMIO;
2499 spin_unlock_irqrestore(&ctrl->lock, flags);
2501 nvme_fc_term_aen_ops(ctrl);
2504 * send a Disconnect(association) LS to fc-nvme target
2505 * Note: could have been sent at top of process, but
2506 * cleaner on link traffic if after the aborts complete.
2507 * Note: if association doesn't exist, association_id will be 0
2509 if (ctrl->association_id)
2510 nvme_fc_xmt_disconnect_assoc(ctrl);
2512 if (ctrl->ctrl.tagset) {
2513 nvme_fc_delete_hw_io_queues(ctrl);
2514 nvme_fc_free_io_queues(ctrl);
2517 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2518 nvme_fc_free_queue(&ctrl->queues[0]);
2522 nvme_fc_delete_ctrl_work(struct work_struct *work)
2524 struct nvme_fc_ctrl *ctrl =
2525 container_of(work, struct nvme_fc_ctrl, delete_work);
2527 cancel_work_sync(&ctrl->reset_work);
2528 cancel_delayed_work_sync(&ctrl->connect_work);
2531 * kill the association on the link side. this will block
2532 * waiting for io to terminate
2534 nvme_fc_delete_association(ctrl);
2537 * tear down the controller
2538 * This will result in the last reference on the nvme ctrl to
2539 * expire, calling the transport nvme_fc_nvme_ctrl_freed() callback.
2540 * From there, the transport will tear down it's logical queues and
2543 nvme_uninit_ctrl(&ctrl->ctrl);
2545 nvme_put_ctrl(&ctrl->ctrl);
2549 __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2551 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2554 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2561 * Request from nvme core layer to delete the controller
2564 nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2566 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2569 if (!kref_get_unless_zero(&ctrl->ctrl.kref))
2572 ret = __nvme_fc_del_ctrl(ctrl);
2575 flush_workqueue(nvme_fc_wq);
2577 nvme_put_ctrl(&ctrl->ctrl);
2583 nvme_fc_reset_ctrl_work(struct work_struct *work)
2585 struct nvme_fc_ctrl *ctrl =
2586 container_of(work, struct nvme_fc_ctrl, reset_work);
2589 /* will block will waiting for io to terminate */
2590 nvme_fc_delete_association(ctrl);
2592 ret = nvme_fc_create_association(ctrl);
2594 dev_warn(ctrl->ctrl.device,
2595 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
2597 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2598 dev_warn(ctrl->ctrl.device,
2599 "NVME-FC{%d}: Max reconnect attempts (%d) "
2600 "reached. Removing controller\n",
2601 ctrl->cnum, ctrl->connect_attempts);
2603 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2604 NVME_CTRL_DELETING)) {
2605 dev_err(ctrl->ctrl.device,
2606 "NVME-FC{%d}: failed to change state "
2607 "to DELETING\n", ctrl->cnum);
2611 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2615 dev_warn(ctrl->ctrl.device,
2616 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2617 ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2618 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2619 ctrl->ctrl.opts->reconnect_delay * HZ);
2621 dev_info(ctrl->ctrl.device,
2622 "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
2626 * called by the nvme core layer, for sysfs interface that requests
2627 * a reset of the nvme controller
2630 nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2632 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2634 dev_warn(ctrl->ctrl.device,
2635 "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
2637 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
2640 if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
2643 flush_work(&ctrl->reset_work);
2648 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2650 .module = THIS_MODULE,
2652 .reg_read32 = nvmf_reg_read32,
2653 .reg_read64 = nvmf_reg_read64,
2654 .reg_write32 = nvmf_reg_write32,
2655 .reset_ctrl = nvme_fc_reset_nvme_ctrl,
2656 .free_ctrl = nvme_fc_nvme_ctrl_freed,
2657 .submit_async_event = nvme_fc_submit_async_event,
2658 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2659 .get_subsysnqn = nvmf_get_subsysnqn,
2660 .get_address = nvmf_get_address,
2664 nvme_fc_connect_ctrl_work(struct work_struct *work)
2668 struct nvme_fc_ctrl *ctrl =
2669 container_of(to_delayed_work(work),
2670 struct nvme_fc_ctrl, connect_work);
2672 ret = nvme_fc_create_association(ctrl);
2674 dev_warn(ctrl->ctrl.device,
2675 "NVME-FC{%d}: Reconnect attempt failed (%d)\n",
2677 if (ctrl->connect_attempts >= NVME_FC_MAX_CONNECT_ATTEMPTS) {
2678 dev_warn(ctrl->ctrl.device,
2679 "NVME-FC{%d}: Max reconnect attempts (%d) "
2680 "reached. Removing controller\n",
2681 ctrl->cnum, ctrl->connect_attempts);
2683 if (!nvme_change_ctrl_state(&ctrl->ctrl,
2684 NVME_CTRL_DELETING)) {
2685 dev_err(ctrl->ctrl.device,
2686 "NVME-FC{%d}: failed to change state "
2687 "to DELETING\n", ctrl->cnum);
2691 WARN_ON(!queue_work(nvme_fc_wq, &ctrl->delete_work));
2695 dev_warn(ctrl->ctrl.device,
2696 "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
2697 ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
2698 queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
2699 ctrl->ctrl.opts->reconnect_delay * HZ);
2701 dev_info(ctrl->ctrl.device,
2702 "NVME-FC{%d}: controller reconnect complete\n",
2707 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
2708 .queue_rq = nvme_fc_queue_rq,
2709 .complete = nvme_fc_complete_rq,
2710 .init_request = nvme_fc_init_admin_request,
2711 .exit_request = nvme_fc_exit_request,
2712 .reinit_request = nvme_fc_reinit_request,
2713 .init_hctx = nvme_fc_init_admin_hctx,
2714 .timeout = nvme_fc_timeout,
2718 static struct nvme_ctrl *
2719 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2720 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2722 struct nvme_fc_ctrl *ctrl;
2723 unsigned long flags;
2726 if (!(rport->remoteport.port_role &
2727 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
2732 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2738 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2744 ctrl->ctrl.opts = opts;
2745 INIT_LIST_HEAD(&ctrl->ctrl_list);
2746 ctrl->lport = lport;
2747 ctrl->rport = rport;
2748 ctrl->dev = lport->dev;
2751 get_device(ctrl->dev);
2752 kref_init(&ctrl->ref);
2754 INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
2755 INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
2756 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
2757 spin_lock_init(&ctrl->lock);
2759 /* io queue count */
2760 ctrl->queue_count = min_t(unsigned int,
2762 lport->ops->max_hw_queues);
2763 opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
2764 ctrl->queue_count++; /* +1 for admin queue */
2766 ctrl->ctrl.sqsize = opts->queue_size - 1;
2767 ctrl->ctrl.kato = opts->kato;
2770 ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2775 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
2776 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
2777 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
2778 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
2779 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
2780 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2782 sizeof(struct scatterlist)) +
2783 ctrl->lport->ops->fcprqst_priv_sz;
2784 ctrl->admin_tag_set.driver_data = ctrl;
2785 ctrl->admin_tag_set.nr_hw_queues = 1;
2786 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2788 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2790 goto out_free_queues;
2792 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2793 if (IS_ERR(ctrl->ctrl.admin_q)) {
2794 ret = PTR_ERR(ctrl->ctrl.admin_q);
2795 goto out_free_admin_tag_set;
2799 * Would have been nice to init io queues tag set as well.
2800 * However, we require interaction from the controller
2801 * for max io queue count before we can do so.
2802 * Defer this to the connect path.
2805 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2807 goto out_cleanup_admin_q;
2809 /* at this point, teardown path changes to ref counting on nvme ctrl */
2811 spin_lock_irqsave(&rport->lock, flags);
2812 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2813 spin_unlock_irqrestore(&rport->lock, flags);
2815 ret = nvme_fc_create_association(ctrl);
2817 ctrl->ctrl.opts = NULL;
2818 /* initiate nvme ctrl ref counting teardown */
2819 nvme_uninit_ctrl(&ctrl->ctrl);
2820 nvme_put_ctrl(&ctrl->ctrl);
2822 /* as we're past the point where we transition to the ref
2823 * counting teardown path, if we return a bad pointer here,
2824 * the calling routine, thinking it's prior to the
2825 * transition, will do an rport put. Since the teardown
2826 * path also does a rport put, we do an extra get here to
2827 * so proper order/teardown happens.
2829 nvme_fc_rport_get(rport);
2833 return ERR_PTR(ret);
2836 dev_info(ctrl->ctrl.device,
2837 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2838 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2842 out_cleanup_admin_q:
2843 blk_cleanup_queue(ctrl->ctrl.admin_q);
2844 out_free_admin_tag_set:
2845 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2847 kfree(ctrl->queues);
2849 put_device(ctrl->dev);
2850 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2854 /* exit via here doesn't follow ctlr ref points */
2855 return ERR_PTR(ret);
2860 FCT_TRADDR_WWNN = 1 << 0,
2861 FCT_TRADDR_WWPN = 1 << 1,
2864 struct nvmet_fc_traddr {
2869 static const match_table_t traddr_opt_tokens = {
2870 { FCT_TRADDR_WWNN, "nn-%s" },
2871 { FCT_TRADDR_WWPN, "pn-%s" },
2872 { FCT_TRADDR_ERR, NULL }
2876 nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2878 substring_t args[MAX_OPT_ARGS];
2879 char *options, *o, *p;
2883 options = o = kstrdup(buf, GFP_KERNEL);
2887 while ((p = strsep(&o, ":\n")) != NULL) {
2891 token = match_token(p, traddr_opt_tokens, args);
2893 case FCT_TRADDR_WWNN:
2894 if (match_u64(args, &token64)) {
2898 traddr->nn = token64;
2900 case FCT_TRADDR_WWPN:
2901 if (match_u64(args, &token64)) {
2905 traddr->pn = token64;
2908 pr_warn("unknown traddr token or missing value '%s'\n",
2920 static struct nvme_ctrl *
2921 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2923 struct nvme_fc_lport *lport;
2924 struct nvme_fc_rport *rport;
2925 struct nvme_ctrl *ctrl;
2926 struct nvmet_fc_traddr laddr = { 0L, 0L };
2927 struct nvmet_fc_traddr raddr = { 0L, 0L };
2928 unsigned long flags;
2931 ret = nvme_fc_parse_address(&raddr, opts->traddr);
2932 if (ret || !raddr.nn || !raddr.pn)
2933 return ERR_PTR(-EINVAL);
2935 ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2936 if (ret || !laddr.nn || !laddr.pn)
2937 return ERR_PTR(-EINVAL);
2939 /* find the host and remote ports to connect together */
2940 spin_lock_irqsave(&nvme_fc_lock, flags);
2941 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2942 if (lport->localport.node_name != laddr.nn ||
2943 lport->localport.port_name != laddr.pn)
2946 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2947 if (rport->remoteport.node_name != raddr.nn ||
2948 rport->remoteport.port_name != raddr.pn)
2951 /* if fail to get reference fall through. Will error */
2952 if (!nvme_fc_rport_get(rport))
2955 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2957 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
2959 nvme_fc_rport_put(rport);
2963 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2965 return ERR_PTR(-ENOENT);
2969 static struct nvmf_transport_ops nvme_fc_transport = {
2971 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2972 .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
2973 .create_ctrl = nvme_fc_create_ctrl,
2976 static int __init nvme_fc_init_module(void)
2980 nvme_fc_wq = create_workqueue("nvme_fc_wq");
2984 ret = nvmf_register_transport(&nvme_fc_transport);
2990 destroy_workqueue(nvme_fc_wq);
2994 static void __exit nvme_fc_exit_module(void)
2996 /* sanity check - all lports should be removed */
2997 if (!list_empty(&nvme_fc_lport_list))
2998 pr_warn("%s: localport list not empty\n", __func__);
3000 nvmf_unregister_transport(&nvme_fc_transport);
3002 destroy_workqueue(nvme_fc_wq);
3004 ida_destroy(&nvme_fc_local_port_cnt);
3005 ida_destroy(&nvme_fc_ctrl_cnt);
3008 module_init(nvme_fc_init_module);
3009 module_exit(nvme_fc_exit_module);
3011 MODULE_LICENSE("GPL v2");