]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
lpfc: support nvmet_fc defer_rcv callback
authorJames Smart <jsmart2021@gmail.com>
Tue, 1 Aug 2017 22:12:40 +0000 (15:12 -0700)
committerChristoph Hellwig <hch@lst.de>
Thu, 10 Aug 2017 09:19:05 +0000 (11:19 +0200)
Currently, calls to nvmet_fc_rcv_fcp_req() always copied the
FC-NVME cmd iu to a temporary buffer before returning, allowing
the driver to immediately repost the buffer to the hardware.

To address timing conditions on queue element structures vs async
command reception, the nvmet_fc transport occasionally may need to
hold on to the command iu buffer for a short period. In these cases,
the nvmet_fc_rcv_fcp_req() will return a special return code
(-EOVERFLOW). In these cases, the LLDD must delay until the new
defer_rcv lldd callback is called before recycling the buffer back
to the hw.

This patch adds support for the new nvmet_fc transport defer_rcv
callback and recognition of the new error code when passing commands
to the transport.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_nvmet.h

index 4ed48ed38e79316f02ca1e299e56f66eea84ba8e..7ee1a94c0b33eefd57a6889df66649477ad4713b 100644 (file)
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf+len, PAGE_SIZE-len,
-                               "FCP: Rcv %08x Release %08x Drop %08x\n",
+                               "FCP: Rcv %08x Defer %08x Release %08x "
+                               "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_defer),
                                atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
index 5cc8b0f7d885fb0dfd5a00342f6ca72c72a40129..744f3f395b64852a294a9300adb64a496087aed7 100644 (file)
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
                                atomic_read(&tgtp->xmt_ls_rsp_error));
 
                len += snprintf(buf + len, size - len,
-                               "FCP: Rcv %08x Drop %08x\n",
+                               "FCP: Rcv %08x Defer %08x Release %08x "
+                               "Drop %08x\n",
                                atomic_read(&tgtp->rcv_fcp_cmd_in),
+                               atomic_read(&tgtp->rcv_fcp_cmd_defer),
+                               atomic_read(&tgtp->xmt_fcp_release),
                                atomic_read(&tgtp->rcv_fcp_cmd_drop));
 
                if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
index fbeec344c6cc3be0bdd878db6bafda7353dcf901..bbbd0f84160d36563008a212afd8252f86ef15c8 100644 (file)
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
        lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
 }
 
+static void
+lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
+                    struct nvmefc_tgt_fcp_req *rsp)
+{
+       struct lpfc_nvmet_tgtport *tgtp;
+       struct lpfc_nvmet_rcv_ctx *ctxp =
+               container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+       struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+       struct lpfc_hba *phba = ctxp->phba;
+
+       lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
+                        ctxp->oxid, ctxp->size, smp_processor_id());
+
+       tgtp = phba->targetport->private;
+       atomic_inc(&tgtp->rcv_fcp_cmd_defer);
+       lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+}
+
 static struct nvmet_fc_target_template lpfc_tgttemplate = {
        .targetport_delete = lpfc_nvmet_targetport_delete,
        .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
        .fcp_op         = lpfc_nvmet_xmt_fcp_op,
        .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
        .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
+       .defer_rcv      = lpfc_nvmet_defer_rcv,
 
        .max_hw_queues  = 1,
        .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
                return;
        }
 
+       /* Processing of FCP command is deferred */
+       if (rc == -EOVERFLOW) {
+               lpfc_nvmeio_data(phba,
+                                "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
+                                oxid, size, sid);
+               /* defer reposting rcv buffer till .defer_rcv callback */
+               ctxp->rqb_buffer = nvmebuf;
+               atomic_inc(&tgtp->rcv_fcp_cmd_out);
+               return;
+       }
+
        atomic_inc(&tgtp->rcv_fcp_cmd_drop);
        lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
                        "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
index e675ef17be08a0f67dd76f9d33a2ab831f961ce8..48a76788b003cb746afa45376272af362c4446ce 100644 (file)
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
        atomic_t rcv_fcp_cmd_in;
        atomic_t rcv_fcp_cmd_out;
        atomic_t rcv_fcp_cmd_drop;
+       atomic_t rcv_fcp_cmd_defer;
        atomic_t xmt_fcp_release;
 
        /* Stats counters - lpfc_nvmet_xmt_fcp_op */