1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
57 /* NVME initiator-based functions */
59 static struct lpfc_nvme_buf *
60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
63 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
67 * lpfc_nvme_create_queue -
68 * @lpfc_pnvme: Pointer to the driver's nvme instance data
69 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
70 * @handle: An opaque driver handle used in follow-up calls.
72 * Driver registers this routine to preallocate and initialize any
73 * internal data structures to bind the @qidx to its internal IO queues.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
91 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
93 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
97 qhandle->cpu_id = smp_processor_id();
100 * NVME qidx == 0 is the admin queue, so both admin queue
101 * and first IO queue will use MSI-X vector and associated
102 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
105 str = "IO "; /* IO queue */
106 qhandle->index = ((qidx - 1) %
107 vport->phba->cfg_nvme_io_channel);
109 str = "ADM"; /* Admin queue */
110 qhandle->index = qidx;
113 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
114 "6073 Binding %s HdwQueue %d (cpu %d) to "
115 "io_channel %d qhandle %p\n", str,
116 qidx, qhandle->cpu_id, qhandle->index, qhandle);
117 *handle = (void *)qhandle;
122 * lpfc_nvme_delete_queue -
123 * @lpfc_pnvme: Pointer to the driver's nvme instance data
124 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
125 * @handle: An opaque driver handle from lpfc_nvme_create_queue
127 * Driver registers this routine to free
128 * any internal data structures to bind the @qidx to its internal
133 * TODO: What are the failure codes.
136 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
140 struct lpfc_nvme_lport *lport;
141 struct lpfc_vport *vport;
143 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
144 vport = lport->vport;
146 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
147 "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
148 lport, qidx, handle);
153 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
155 struct lpfc_nvme_lport *lport = localport->private;
157 /* release any threads waiting for the unreg to complete */
158 complete(&lport->lport_unreg_done);
161 /* lpfc_nvme_remoteport_delete
163 * @remoteport: Pointer to an nvme transport remoteport instance.
165 * This is a template downcall. NVME transport calls this function
166 * when it has completed the unregistration of a previously
167 * registered remoteport.
173 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
175 struct lpfc_nvme_rport *rport = remoteport->private;
176 struct lpfc_vport *vport;
177 struct lpfc_nodelist *ndlp;
187 /* Remove this rport from the lport's list - memory is owned by the
188 * transport. Remove the ndlp reference for the NVME transport before
189 * calling state machine to remove the node, this is devloss = 0
192 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
193 "6146 remoteport delete complete %p\n",
195 list_del(&rport->list);
199 /* This call has to execute as long as the rport is valid.
200 * Release any threads waiting for the unreg to complete.
202 complete(&rport->rport_unreg_done);
206 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
207 struct lpfc_wcqe_complete *wcqe)
209 struct lpfc_vport *vport = cmdwqe->vport;
211 struct nvmefc_ls_req *pnvme_lsreq;
212 struct lpfc_dmabuf *buf_ptr;
213 struct lpfc_nodelist *ndlp;
215 vport->phba->fc4NvmeLsCmpls++;
217 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
218 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
219 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
220 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
221 "6047 nvme cmpl Enter "
222 "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
224 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
225 cmdwqe->sli4_xritag, status,
226 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
228 lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
229 cmdwqe->sli4_xritag, status, wcqe->parameter);
231 if (cmdwqe->context3) {
232 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
233 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
235 cmdwqe->context3 = NULL;
237 if (pnvme_lsreq->done)
238 pnvme_lsreq->done(pnvme_lsreq, status);
240 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
241 "6046 nvme cmpl without done call back? "
242 "Data %p DID %x Xri: %x status %x\n",
243 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
244 cmdwqe->sli4_xritag, status);
247 cmdwqe->context1 = NULL;
249 lpfc_sli_release_iocbq(phba, cmdwqe);
253 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
254 struct lpfc_dmabuf *inp,
255 struct nvmefc_ls_req *pnvme_lsreq,
256 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
257 struct lpfc_wcqe_complete *),
258 struct lpfc_nodelist *ndlp, uint32_t num_entry,
259 uint32_t tmo, uint8_t retry)
261 struct lpfc_hba *phba = vport->phba;
263 struct lpfc_iocbq *genwqe;
264 struct ulp_bde64 *bpl;
265 struct ulp_bde64 bde;
266 int i, rc, xmit_len, first_len;
268 /* Allocate buffer for command WQE */
269 genwqe = lpfc_sli_get_iocbq(phba);
274 memset(wqe, 0, sizeof(union lpfc_wqe));
276 genwqe->context3 = (uint8_t *)bmp;
277 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
279 /* Save for completion so we can release these resources */
280 genwqe->context1 = lpfc_nlp_get(ndlp);
281 genwqe->context2 = (uint8_t *)pnvme_lsreq;
282 /* Fill in payload, bp points to frame payload */
285 /* FC spec states we need 3 * ratov for CT requests */
286 tmo = (3 * phba->fc_ratov);
288 /* For this command calculate the xmit length of the request bde. */
291 bpl = (struct ulp_bde64 *)bmp->virt;
292 for (i = 0; i < num_entry; i++) {
293 bde.tus.w = bpl[i].tus.w;
294 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
296 xmit_len += bde.tus.f.bdeSize;
298 first_len = xmit_len;
301 genwqe->rsvd2 = num_entry;
302 genwqe->hba_wqidx = 0;
305 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
306 wqe->generic.bde.tus.f.bdeSize = first_len;
307 wqe->generic.bde.addrLow = bpl[0].addrLow;
308 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
311 wqe->gen_req.request_payload_len = first_len;
316 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
317 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
318 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
319 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
320 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
323 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
324 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
325 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
328 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
329 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
330 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
331 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
334 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
337 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
340 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
341 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
342 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
343 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
344 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
347 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
348 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
351 /* Issue GEN REQ WQE for NPORT <did> */
352 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
353 "6050 Issue GEN REQ WQE to NPORT x%x "
354 "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
355 ndlp->nlp_DID, genwqe->iotag,
357 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
358 genwqe->wqe_cmpl = cmpl;
359 genwqe->iocb_cmpl = NULL;
360 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
361 genwqe->vport = vport;
362 genwqe->retry = retry;
364 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
365 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
367 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
368 if (rc == WQE_ERROR) {
369 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
370 "6045 Issue GEN REQ WQE to NPORT x%x "
372 ndlp->nlp_DID, genwqe->iotag,
374 lpfc_sli_release_iocbq(phba, genwqe);
381 * lpfc_nvme_ls_req - Issue an Link Service request
382 * @lpfc_pnvme: Pointer to the driver's nvme instance data
383 * @lpfc_nvme_lport: Pointer to the driver's local port data
384 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
386 * Driver registers this routine to handle any link service request
387 * from the nvme_fc transport to a remote nvme-aware port.
391 * TODO: What are the failure codes.
394 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
395 struct nvme_fc_remote_port *pnvme_rport,
396 struct nvmefc_ls_req *pnvme_lsreq)
399 struct lpfc_nvme_lport *lport;
400 struct lpfc_vport *vport;
401 struct lpfc_nodelist *ndlp;
402 struct ulp_bde64 *bpl;
403 struct lpfc_dmabuf *bmp;
404 uint16_t ntype, nstate;
406 /* there are two dma buf in the request, actually there is one and
407 * the second one is just the start address + cmd size.
408 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
409 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
410 * because the nvem layer owns the data bufs.
411 * We do not have to break these packets open, we don't care what is in
412 * them. And we do not have to look at the resonse data, we only care
413 * that we got a response. All of the caring is going to happen in the
417 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
418 vport = lport->vport;
420 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
421 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
422 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
423 "6051 DID x%06x not an active rport.\n",
424 pnvme_rport->port_id);
428 /* The remote node has to be a mapped nvme target or an
429 * unmapped nvme initiator or it's an error.
431 ntype = ndlp->nlp_type;
432 nstate = ndlp->nlp_state;
433 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
434 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
435 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
436 "6088 DID x%06x not ready for "
437 "IO. State x%x, Type x%x\n",
438 pnvme_rport->port_id,
439 ndlp->nlp_state, ndlp->nlp_type);
442 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
445 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
446 "6044 Could not find node for DID %x\n",
447 pnvme_rport->port_id);
450 INIT_LIST_HEAD(&bmp->list);
451 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
453 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
454 "6042 Could not find node for DID %x\n",
455 pnvme_rport->port_id);
459 bpl = (struct ulp_bde64 *)bmp->virt;
460 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
461 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
462 bpl->tus.f.bdeFlags = 0;
463 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
464 bpl->tus.w = le32_to_cpu(bpl->tus.w);
467 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
468 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
469 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
470 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
471 bpl->tus.w = le32_to_cpu(bpl->tus.w);
473 /* Expand print to include key fields. */
474 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
475 "6149 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
476 "rsplen:%d %pad %pad\n",
477 pnvme_lport, pnvme_rport,
478 pnvme_lsreq, pnvme_lsreq->rqstlen,
479 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
480 &pnvme_lsreq->rspdma);
482 vport->phba->fc4NvmeLsRequests++;
484 /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
485 * This code allows it all to work.
487 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
488 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
490 if (ret != WQE_SUCCESS) {
491 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
492 "6052 EXIT. issue ls wqe failed lport %p, "
493 "rport %p lsreq%p Status %x DID %x\n",
494 pnvme_lport, pnvme_rport, pnvme_lsreq,
496 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
501 /* Stub in routine and return 0 for now. */
506 * lpfc_nvme_ls_abort - Issue an Link Service request
507 * @lpfc_pnvme: Pointer to the driver's nvme instance data
508 * @lpfc_nvme_lport: Pointer to the driver's local port data
509 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
511 * Driver registers this routine to handle any link service request
512 * from the nvme_fc transport to a remote nvme-aware port.
516 * TODO: What are the failure codes.
519 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
520 struct nvme_fc_remote_port *pnvme_rport,
521 struct nvmefc_ls_req *pnvme_lsreq)
523 struct lpfc_nvme_lport *lport;
524 struct lpfc_vport *vport;
525 struct lpfc_hba *phba;
526 struct lpfc_nodelist *ndlp;
527 LIST_HEAD(abort_list);
528 struct lpfc_sli_ring *pring;
529 struct lpfc_iocbq *wqe, *next_wqe;
531 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
532 vport = lport->vport;
535 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
537 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
538 "6049 Could not find node for DID %x\n",
539 pnvme_rport->port_id);
543 /* Expand print to include key fields. */
544 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
545 "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
546 "rsplen:%d %pad %pad\n",
547 pnvme_lport, pnvme_rport,
548 pnvme_lsreq, pnvme_lsreq->rqstlen,
549 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
550 &pnvme_lsreq->rspdma);
553 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
554 * that need an ABTS. The IOs need to stay on the txcmplq so that
555 * the abort operation completes them successfully.
557 pring = phba->sli4_hba.nvmels_wq->pring;
558 spin_lock_irq(&phba->hbalock);
559 spin_lock(&pring->ring_lock);
560 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
561 /* Add to abort_list on on NDLP match. */
562 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
563 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
564 list_add_tail(&wqe->dlist, &abort_list);
567 spin_unlock(&pring->ring_lock);
568 spin_unlock_irq(&phba->hbalock);
570 /* Abort the targeted IOs and remove them from the abort list. */
571 list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
572 spin_lock_irq(&phba->hbalock);
573 list_del_init(&wqe->dlist);
574 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
575 spin_unlock_irq(&phba->hbalock);
579 /* Fix up the existing sgls for NVME IO. */
581 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
582 struct lpfc_nvme_buf *lpfc_ncmd,
583 struct nvmefc_fcp_req *nCmd)
585 struct sli4_sge *sgl;
586 union lpfc_wqe128 *wqe;
587 uint32_t *wptr, *dptr;
590 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
591 * match NVME. NVME sends 96 bytes. Also, use the
592 * nvme commands command and response dma addresses
593 * rather than the virtual memory to ease the restore
596 sgl = lpfc_ncmd->nvme_sgl;
597 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
601 /* Setup the physical region for the FCP RSP */
602 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
603 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
604 sgl->word2 = le32_to_cpu(sgl->word2);
606 bf_set(lpfc_sli4_sge_last, sgl, 0);
608 bf_set(lpfc_sli4_sge_last, sgl, 1);
609 sgl->word2 = cpu_to_le32(sgl->word2);
610 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
613 * Get a local pointer to the built-in wqe and correct
614 * the cmd size to match NVME's 96 bytes and fix
618 /* 128 byte wqe support here */
619 wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
621 /* Word 0-2 - NVME CMND IU (embedded payload) */
622 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
623 wqe->generic.bde.tus.f.bdeSize = 60;
624 wqe->generic.bde.addrHigh = 0;
625 wqe->generic.bde.addrLow = 64; /* Word 16 */
628 bf_set(payload_offset_len, &wqe->fcp_icmd,
629 (nCmd->rsplen + nCmd->cmdlen));
632 bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
633 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
636 * Embed the payload in the last half of the WQE
637 * WQE words 16-30 get the NVME CMD IU payload
639 * WQE words 16-19 get payload Words 1-4
640 * WQE words 20-21 get payload Words 6-7
641 * WQE words 22-29 get payload Words 16-23
643 wptr = &wqe->words[16]; /* WQE ptr */
644 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
645 dptr++; /* Skip Word 0 in payload */
647 *wptr++ = *dptr++; /* Word 1 */
648 *wptr++ = *dptr++; /* Word 2 */
649 *wptr++ = *dptr++; /* Word 3 */
650 *wptr++ = *dptr++; /* Word 4 */
651 dptr++; /* Skip Word 5 in payload */
652 *wptr++ = *dptr++; /* Word 6 */
653 *wptr++ = *dptr++; /* Word 7 */
654 dptr += 8; /* Skip Words 8-15 in payload */
655 *wptr++ = *dptr++; /* Word 16 */
656 *wptr++ = *dptr++; /* Word 17 */
657 *wptr++ = *dptr++; /* Word 18 */
658 *wptr++ = *dptr++; /* Word 19 */
659 *wptr++ = *dptr++; /* Word 20 */
660 *wptr++ = *dptr++; /* Word 21 */
661 *wptr++ = *dptr++; /* Word 22 */
662 *wptr = *dptr; /* Word 23 */
665 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
667 lpfc_nvme_ktime(struct lpfc_hba *phba,
668 struct lpfc_nvme_buf *lpfc_ncmd)
670 uint64_t seg1, seg2, seg3, seg4;
674 if (!lpfc_ncmd->ts_last_cmd ||
675 !lpfc_ncmd->ts_cmd_start ||
676 !lpfc_ncmd->ts_cmd_wqput ||
677 !lpfc_ncmd->ts_isr_cmpl ||
678 !lpfc_ncmd->ts_data_nvme)
680 if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
682 if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
684 if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
686 if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
689 * Segment 1 - Time from Last FCP command cmpl is handed
690 * off to NVME Layer to start of next command.
691 * Segment 2 - Time from Driver receives a IO cmd start
692 * from NVME Layer to WQ put is done on IO cmd.
693 * Segment 3 - Time from Driver WQ put is done on IO cmd
694 * to MSI-X ISR for IO cmpl.
695 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
696 * cmpl is handled off to the NVME Layer.
698 seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
699 if (seg1 > 5000000) /* 5 ms - for sequential IOs */
702 /* Calculate times relative to start of IO */
703 seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
704 seg3 = (lpfc_ncmd->ts_isr_cmpl -
705 lpfc_ncmd->ts_cmd_start) - seg2;
706 seg4 = (lpfc_ncmd->ts_data_nvme -
707 lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
708 phba->ktime_data_samples++;
709 phba->ktime_seg1_total += seg1;
710 if (seg1 < phba->ktime_seg1_min)
711 phba->ktime_seg1_min = seg1;
712 else if (seg1 > phba->ktime_seg1_max)
713 phba->ktime_seg1_max = seg1;
714 phba->ktime_seg2_total += seg2;
715 if (seg2 < phba->ktime_seg2_min)
716 phba->ktime_seg2_min = seg2;
717 else if (seg2 > phba->ktime_seg2_max)
718 phba->ktime_seg2_max = seg2;
719 phba->ktime_seg3_total += seg3;
720 if (seg3 < phba->ktime_seg3_min)
721 phba->ktime_seg3_min = seg3;
722 else if (seg3 > phba->ktime_seg3_max)
723 phba->ktime_seg3_max = seg3;
724 phba->ktime_seg4_total += seg4;
725 if (seg4 < phba->ktime_seg4_min)
726 phba->ktime_seg4_min = seg4;
727 else if (seg4 > phba->ktime_seg4_max)
728 phba->ktime_seg4_max = seg4;
730 lpfc_ncmd->ts_last_cmd = 0;
731 lpfc_ncmd->ts_cmd_start = 0;
732 lpfc_ncmd->ts_cmd_wqput = 0;
733 lpfc_ncmd->ts_isr_cmpl = 0;
734 lpfc_ncmd->ts_data_nvme = 0;
739 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
740 * @lpfc_pnvme: Pointer to the driver's nvme instance data
741 * @lpfc_nvme_lport: Pointer to the driver's local port data
742 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
744 * Driver registers this routine as it io request handler. This
745 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
746 * data structure to the rport indicated in @lpfc_nvme_rport.
750 * TODO: What are the failure codes.
753 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
754 struct lpfc_wcqe_complete *wcqe)
756 struct lpfc_nvme_buf *lpfc_ncmd =
757 (struct lpfc_nvme_buf *)pwqeIn->context1;
758 struct lpfc_vport *vport = pwqeIn->vport;
759 struct nvmefc_fcp_req *nCmd;
760 struct nvme_fc_ersp_iu *ep;
761 struct nvme_fc_cmd_iu *cp;
762 struct lpfc_nvme_rport *rport;
763 struct lpfc_nodelist *ndlp;
766 uint16_t cid, sqhd, data;
769 /* Sanity check on return of outstanding command */
770 if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
771 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
772 "6071 Completion pointers bad on wqe %p.\n",
776 phba->fc4NvmeIoCmpls++;
778 nCmd = lpfc_ncmd->nvmeCmd;
779 rport = lpfc_ncmd->nrport;
781 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
782 lpfc_ncmd->cur_iocbq.sli4_xritag,
783 bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
785 * Catch race where our node has transitioned, but the
786 * transport is still transitioning.
789 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
790 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
791 "6061 rport %p, DID x%06x node not ready.\n",
792 rport, rport->remoteport->port_id);
794 ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
796 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
797 "6062 Ignoring NVME cmpl. No ndlp\n");
802 code = bf_get(lpfc_wcqe_c_code, wcqe);
803 if (code == CQE_CODE_NVME_ERSP) {
804 /* For this type of CQE, we need to rebuild the rsp */
805 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
808 * Get Command Id from cmd to plug into response. This
809 * code is not needed in the next NVME Transport drop.
811 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
812 cid = cp->sqe.common.command_id;
815 * RSN is in CQE word 2
816 * SQHD is in CQE Word 3 bits 15:0
817 * Cmd Specific info is in CQE Word 1
818 * and in CQE Word 0 bits 15:0
820 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
822 /* Now lets build the NVME ERSP IU */
823 ep->iu_len = cpu_to_be16(8);
824 ep->rsn = wcqe->parameter;
825 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
827 ptr = (uint32_t *)&ep->cqe.result.u64;
828 *ptr++ = wcqe->total_data_placed;
829 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
830 *ptr = (uint32_t)data;
831 ep->cqe.sq_head = sqhd;
832 ep->cqe.sq_id = nCmd->sqid;
833 ep->cqe.command_id = cid;
836 lpfc_ncmd->status = IOSTAT_SUCCESS;
837 lpfc_ncmd->result = 0;
838 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
839 nCmd->transferred_length = nCmd->payload_length;
841 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
842 LPFC_IOCB_STATUS_MASK);
843 lpfc_ncmd->result = wcqe->parameter;
845 /* For NVME, the only failure path that results in an
846 * IO error is when the adapter rejects it. All other
847 * conditions are a success case and resolved by the
849 * IOSTAT_FCP_RSP_ERROR means:
850 * 1. Length of data received doesn't match total
851 * transfer length in WQE
852 * 2. If the RSP payload does NOT match these cases:
853 * a. RSP length 12/24 bytes and all zeros
856 switch (lpfc_ncmd->status) {
858 nCmd->transferred_length = wcqe->total_data_placed;
859 nCmd->rcv_rsplen = 0;
862 case IOSTAT_FCP_RSP_ERROR:
863 nCmd->transferred_length = wcqe->total_data_placed;
864 nCmd->rcv_rsplen = wcqe->parameter;
867 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
869 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
870 "6081 NVME Completion Protocol Error: "
871 "status x%x result x%x placed x%x\n",
872 lpfc_ncmd->status, lpfc_ncmd->result,
873 wcqe->total_data_placed);
877 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
878 "6072 NVME Completion Error: "
879 "status x%x result x%x placed x%x\n",
880 lpfc_ncmd->status, lpfc_ncmd->result,
881 wcqe->total_data_placed);
882 nCmd->transferred_length = 0;
883 nCmd->rcv_rsplen = 0;
884 nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
888 /* pick up SLI4 exhange busy condition */
889 if (bf_get(lpfc_wcqe_c_xb, wcqe))
890 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
892 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
894 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
895 atomic_dec(&ndlp->cmd_pending);
897 /* Update stats and complete the IO. There is
898 * no need for dma unprep because the nvme_transport
899 * owns the dma address.
901 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
902 if (phba->ktime_on) {
903 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
904 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
905 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
906 lpfc_nvme_ktime(phba, lpfc_ncmd);
908 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
909 if (lpfc_ncmd->cpu != smp_processor_id())
910 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
911 "6701 CPU Check cmpl: "
912 "cpu %d expect %d\n",
913 smp_processor_id(), lpfc_ncmd->cpu);
914 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
915 phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
920 spin_lock_irqsave(&phba->hbalock, flags);
921 lpfc_ncmd->nrport = NULL;
922 spin_unlock_irqrestore(&phba->hbalock, flags);
924 lpfc_release_nvme_buf(phba, lpfc_ncmd);
929 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
930 * @lpfc_pnvme: Pointer to the driver's nvme instance data
931 * @lpfc_nvme_lport: Pointer to the driver's local port data
932 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
933 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
934 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
936 * Driver registers this routine as it io request handler. This
937 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
938 * data structure to the rport indicated in @lpfc_nvme_rport.
942 * TODO: What are the failure codes.
945 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
946 struct lpfc_nvme_buf *lpfc_ncmd,
947 struct lpfc_nodelist *pnode)
949 struct lpfc_hba *phba = vport->phba;
950 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
951 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
952 union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
955 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
959 * There are three possibilities here - use scatter-gather segment, use
960 * the single mapping, or neither.
962 wqe->fcp_iwrite.initial_xfer_len = 0;
964 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
966 if ((phba->cfg_nvme_enable_fb) &&
967 (pnode->nlp_flag & NLP_FIRSTBURST)) {
968 req_len = lpfc_ncmd->nvmeCmd->payload_length;
969 if (req_len < pnode->nvme_fb_size)
970 wqe->fcp_iwrite.initial_xfer_len =
973 wqe->fcp_iwrite.initial_xfer_len =
978 bf_set(wqe_cmnd, &wqe->generic.wqe_com,
979 CMD_FCP_IWRITE64_WQE);
980 bf_set(wqe_pu, &wqe->generic.wqe_com,
984 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
985 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com,
987 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
988 LPFC_WQE_LENLOC_WORD4);
989 if (phba->cfg_nvme_oas)
990 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
993 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
996 phba->fc4NvmeOutputRequests++;
999 bf_set(wqe_cmnd, &wqe->generic.wqe_com,
1000 CMD_FCP_IREAD64_WQE);
1001 bf_set(wqe_pu, &wqe->generic.wqe_com,
1005 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
1006 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1008 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
1009 LPFC_WQE_LENLOC_WORD4);
1010 if (phba->cfg_nvme_oas)
1011 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
1014 bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1017 phba->fc4NvmeInputRequests++;
1021 wqe->fcp_icmd.rsrvd4 = 0;
1024 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE);
1025 bf_set(wqe_pu, &wqe->generic.wqe_com, 0);
1028 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
1029 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
1030 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
1031 LPFC_WQE_LENLOC_NONE);
1032 if (phba->cfg_nvme_oas)
1033 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
1036 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
1038 phba->fc4NvmeControlRequests++;
1041 * Finish initializing those WQE fields that are independent
1042 * of the nvme_cmnd request_buffer
1046 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1047 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1048 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1051 /* Preserve Class data in the ndlp. */
1052 bf_set(wqe_class, &wqe->generic.wqe_com,
1053 (pnode->nlp_fcp_info & 0x0f));
1056 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1059 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1062 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1064 pwqeq->vport = vport;
1070 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1071 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1072 * @lpfc_nvme_lport: Pointer to the driver's local port data
1073 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1074 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1075 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1077 * Driver registers this routine as it io request handler. This
1078 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1079 * data structure to the rport indicated in @lpfc_nvme_rport.
1083 * TODO: What are the failure codes.
1086 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1087 struct lpfc_nvme_buf *lpfc_ncmd)
1089 struct lpfc_hba *phba = vport->phba;
1090 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1091 union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
1092 struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1093 struct scatterlist *data_sg;
1094 struct sli4_sge *first_data_sgl;
1095 dma_addr_t physaddr;
1096 uint32_t num_bde = 0;
1098 uint32_t dma_offset = 0;
1101 /* Fix up the command and response DMA stuff. */
1102 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1105 * There are three possibilities here - use scatter-gather segment, use
1106 * the single mapping, or neither.
1110 * Jump over the cmd and rsp SGEs. The fix routine
1111 * has already adjusted for this.
1115 first_data_sgl = sgl;
1116 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1117 if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1118 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1119 "6058 Too many sg segments from "
1120 "NVME Transport. Max %d, "
1121 "nvmeIO sg_cnt %d\n",
1122 phba->cfg_sg_seg_cnt,
1123 lpfc_ncmd->seg_cnt);
1124 lpfc_ncmd->seg_cnt = 0;
1129 * The driver established a maximum scatter-gather segment count
1130 * during probe that limits the number of sg elements in any
1131 * single nvme command. Just run through the seg_cnt and format
1134 nseg = nCmd->sg_cnt;
1135 data_sg = nCmd->first_sgl;
1136 for (i = 0; i < nseg; i++) {
1137 if (data_sg == NULL) {
1138 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1139 "6059 dptr err %d, nseg %d\n",
1141 lpfc_ncmd->seg_cnt = 0;
1144 physaddr = data_sg->dma_address;
1145 dma_len = data_sg->length;
1146 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1147 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1148 sgl->word2 = le32_to_cpu(sgl->word2);
1149 if ((num_bde + 1) == nseg)
1150 bf_set(lpfc_sli4_sge_last, sgl, 1);
1152 bf_set(lpfc_sli4_sge_last, sgl, 0);
1153 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1154 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1155 sgl->word2 = cpu_to_le32(sgl->word2);
1156 sgl->sge_len = cpu_to_le32(dma_len);
1158 dma_offset += dma_len;
1159 data_sg = sg_next(data_sg);
1163 /* For this clause to be valid, the payload_length
1164 * and sg_cnt must zero.
1166 if (nCmd->payload_length != 0) {
1167 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1168 "6063 NVME DMA Prep Err: sg_cnt %d "
1169 "payload_length x%x\n",
1170 nCmd->sg_cnt, nCmd->payload_length);
1176 * Due to difference in data length between DIF/non-DIF paths,
1177 * we need to set word 4 of WQE here
1179 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1184 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1185 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1186 * @lpfc_nvme_lport: Pointer to the driver's local port data
1187 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1188 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1189 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1191 * Driver registers this routine as it io request handler. This
1192 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1193 * data structure to the rport
1194 indicated in @lpfc_nvme_rport.
1198 * TODO: What are the failure codes.
1201 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1202 struct nvme_fc_remote_port *pnvme_rport,
1203 void *hw_queue_handle,
1204 struct nvmefc_fcp_req *pnvme_fcreq)
1207 struct lpfc_nvme_lport *lport;
1208 struct lpfc_vport *vport;
1209 struct lpfc_hba *phba;
1210 struct lpfc_nodelist *ndlp;
1211 struct lpfc_nvme_buf *lpfc_ncmd;
1212 struct lpfc_nvme_rport *rport;
1213 struct lpfc_nvme_qhandle *lpfc_queue_info;
1214 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1218 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1219 vport = lport->vport;
1222 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1224 start = ktime_get_ns();
1226 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1227 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1230 * Catch race where our node has transitioned, but the
1231 * transport is still transitioning.
1234 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1235 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1236 "6053 rport %p, ndlp %p, DID x%06x "
1237 "ndlp not ready.\n",
1238 rport, ndlp, pnvme_rport->port_id);
1240 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1242 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1243 "6066 Missing node for DID %x\n",
1244 pnvme_rport->port_id);
1250 /* The remote node has to be a mapped target or it's an error. */
1251 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1252 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1253 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1254 "6036 rport %p, DID x%06x not ready for "
1255 "IO. State x%x, Type x%x\n",
1256 rport, pnvme_rport->port_id,
1257 ndlp->nlp_state, ndlp->nlp_type);
1263 /* The node is shared with FCP IO, make sure the IO pending count does
1264 * not exceed the programmed depth.
1266 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
1271 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
1272 if (lpfc_ncmd == NULL) {
1273 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1274 "6065 driver's buffer pool is empty, "
1279 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1280 if (phba->ktime_on) {
1281 lpfc_ncmd->ts_cmd_start = start;
1282 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1287 * Store the data needed by the driver to issue, abort, and complete
1289 * Do not let the IO hang out forever. There is no midlayer issuing
1290 * an abort so inform the FW of the maximum IO pending time.
1292 pnvme_fcreq->private = (void *)lpfc_ncmd;
1293 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1294 lpfc_ncmd->nrport = rport;
1295 lpfc_ncmd->ndlp = ndlp;
1296 lpfc_ncmd->start_time = jiffies;
1298 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
1299 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1302 goto out_free_nvme_buf;
1305 atomic_inc(&ndlp->cmd_pending);
1308 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1309 * This identfier was create in our hardware queue create callback
1310 * routine. The driver now is dependent on the IO queue steering from
1311 * the transport. We are trusting the upper NVME layers know which
1312 * index to use and that they have affinitized a CPU to this hardware
1313 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1315 lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
1317 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1318 lpfc_ncmd->cur_iocbq.sli4_xritag,
1319 lpfc_queue_info->index, ndlp->nlp_DID);
1321 ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1323 atomic_dec(&ndlp->cmd_pending);
1324 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1325 "6113 FCP could not issue WQE err %x "
1326 "sid: x%x did: x%x oxid: x%x\n",
1327 ret, vport->fc_myDID, ndlp->nlp_DID,
1328 lpfc_ncmd->cur_iocbq.sli4_xritag);
1330 goto out_free_nvme_buf;
1333 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1335 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1337 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1338 lpfc_ncmd->cpu = smp_processor_id();
1339 if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1340 /* Check for admin queue */
1341 if (lpfc_queue_info->qidx) {
1342 lpfc_printf_vlog(vport,
1343 KERN_ERR, LOG_NVME_IOERR,
1344 "6702 CPU Check cmd: "
1347 lpfc_queue_info->index);
1349 lpfc_ncmd->cpu = lpfc_queue_info->index;
1351 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1352 phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1358 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1364 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1365 * @phba: Pointer to HBA context object
1366 * @cmdiocb: Pointer to command iocb object.
1367 * @rspiocb: Pointer to response iocb object.
1369 * This is the callback function for any NVME FCP IO that was aborted.
1375 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1376 struct lpfc_wcqe_complete *abts_cmpl)
1378 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1379 "6145 ABORT_XRI_CN completing on rpi x%x "
1380 "original iotag x%x, abort cmd iotag x%x "
1381 "req_tag x%x, status x%x, hwstatus x%x\n",
1382 cmdiocb->iocb.un.acxri.abortContextTag,
1383 cmdiocb->iocb.un.acxri.abortIoTag,
1385 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1386 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1387 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1388 lpfc_sli_release_iocbq(phba, cmdiocb);
1392 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1393 * @lpfc_pnvme: Pointer to the driver's nvme instance data
1394 * @lpfc_nvme_lport: Pointer to the driver's local port data
1395 * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1396 * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1397 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1399 * Driver registers this routine as its nvme request io abort handler. This
1400 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1401 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1402 * is executed asynchronously - one the target is validated as "MAPPED" and
1403 * ready for IO, the driver issues the abort request and returns.
1409 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1410 struct nvme_fc_remote_port *pnvme_rport,
1411 void *hw_queue_handle,
1412 struct nvmefc_fcp_req *pnvme_fcreq)
1414 struct lpfc_nvme_lport *lport;
1415 struct lpfc_vport *vport;
1416 struct lpfc_hba *phba;
1417 struct lpfc_nodelist *ndlp;
1418 struct lpfc_nvme_rport *rport;
1419 struct lpfc_nvme_buf *lpfc_nbuf;
1420 struct lpfc_iocbq *abts_buf;
1421 struct lpfc_iocbq *nvmereq_wqe;
1422 union lpfc_wqe *abts_wqe;
1423 unsigned long flags;
1426 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1427 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1428 vport = lport->vport;
1431 /* Announce entry to new IO submit field. */
1432 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1433 "6002 Abort Request to rport DID x%06x "
1434 "for nvme_fc_req %p\n",
1435 pnvme_rport->port_id,
1439 * Catch race where our node has transitioned, but the
1440 * transport is still transitioning.
1443 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1444 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
1445 "6054 rport %p, ndlp %p, DID x%06x ndlp "
1447 rport, ndlp, pnvme_rport->port_id);
1449 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1451 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1452 "6055 Could not find node for "
1454 pnvme_rport->port_id);
1459 /* The remote node has to be ready to send an abort. */
1460 if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
1461 !(ndlp->nlp_type & NLP_NVME_TARGET)) {
1462 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
1463 "6048 rport %p, DID x%06x not ready for "
1464 "IO. State x%x, Type x%x\n",
1465 rport, pnvme_rport->port_id,
1466 ndlp->nlp_state, ndlp->nlp_type);
1470 /* If the hba is getting reset, this flag is set. It is
1471 * cleared when the reset is complete and rings reestablished.
1473 spin_lock_irqsave(&phba->hbalock, flags);
1474 /* driver queued commands are in process of being flushed */
1475 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1476 spin_unlock_irqrestore(&phba->hbalock, flags);
1477 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1478 "6139 Driver in reset cleanup - flushing "
1479 "NVME Req now. hba_flag x%x\n",
1484 lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private;
1486 spin_unlock_irqrestore(&phba->hbalock, flags);
1487 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1488 "6140 NVME IO req has no matching lpfc nvme "
1489 "io buffer. Skipping abort req.\n");
1491 } else if (!lpfc_nbuf->nvmeCmd) {
1492 spin_unlock_irqrestore(&phba->hbalock, flags);
1493 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1494 "6141 lpfc NVME IO req has no nvme_fcreq "
1495 "io buffer. Skipping abort req.\n");
1498 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1501 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1502 * state must match the nvme_fcreq passed by the nvme
1503 * transport. If they don't match, it is likely the driver
1504 * has already completed the NVME IO and the nvme transport
1505 * has not seen it yet.
1507 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1508 spin_unlock_irqrestore(&phba->hbalock, flags);
1509 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1510 "6143 NVME req mismatch: "
1511 "lpfc_nbuf %p nvmeCmd %p, "
1512 "pnvme_fcreq %p. Skipping Abort xri x%x\n",
1513 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1514 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1518 /* Don't abort IOs no longer on the pending queue. */
1519 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1520 spin_unlock_irqrestore(&phba->hbalock, flags);
1521 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1522 "6142 NVME IO req %p not queued - skipping "
1523 "abort req xri x%x\n",
1524 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1528 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1529 nvmereq_wqe->sli4_xritag,
1530 nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
1532 /* Outstanding abort is in progress */
1533 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1534 spin_unlock_irqrestore(&phba->hbalock, flags);
1535 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1536 "6144 Outstanding NVME I/O Abort Request "
1537 "still pending on nvme_fcreq %p, "
1538 "lpfc_ncmd %p xri x%x\n",
1539 pnvme_fcreq, lpfc_nbuf,
1540 nvmereq_wqe->sli4_xritag);
1544 abts_buf = __lpfc_sli_get_iocbq(phba);
1546 spin_unlock_irqrestore(&phba->hbalock, flags);
1547 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1548 "6136 No available abort wqes. Skipping "
1549 "Abts req for nvme_fcreq %p xri x%x\n",
1550 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1554 /* Ready - mark outstanding as aborted by driver. */
1555 nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1557 /* Complete prepping the abort wqe and issue to the FW. */
1558 abts_wqe = &abts_buf->wqe;
1560 /* WQEs are reused. Clear stale data and set key fields to
1561 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1563 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1564 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1567 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1568 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1569 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1570 nvmereq_wqe->iocb.ulpClass);
1572 /* word 8 - tell the FW to abort the IO associated with this
1573 * outstanding exchange ID.
1575 abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1577 /* word 9 - this is the iotag for the abts_wqe completion. */
1578 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1582 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
1583 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1584 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1587 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1588 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1589 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1591 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1592 abts_buf->iocb_flag |= LPFC_IO_NVME;
1593 abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1594 abts_buf->vport = vport;
1595 abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1596 ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
1597 spin_unlock_irqrestore(&phba->hbalock, flags);
1598 if (ret_val == IOCB_ERROR) {
1599 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1600 "6137 Failed abts issue_wqe with status x%x "
1601 "for nvme_fcreq %p.\n",
1602 ret_val, pnvme_fcreq);
1603 lpfc_sli_release_iocbq(phba, abts_buf);
1607 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1608 "6138 Transport Abort NVME Request Issued for "
1609 "ox_id x%x on reqtag x%x\n",
1610 nvmereq_wqe->sli4_xritag,
1614 /* Declare and initialization an instance of the FC NVME template. */
1615 static struct nvme_fc_port_template lpfc_nvme_template = {
1616 /* initiator-based functions */
1617 .localport_delete = lpfc_nvme_localport_delete,
1618 .remoteport_delete = lpfc_nvme_remoteport_delete,
1619 .create_queue = lpfc_nvme_create_queue,
1620 .delete_queue = lpfc_nvme_delete_queue,
1621 .ls_req = lpfc_nvme_ls_req,
1622 .fcp_io = lpfc_nvme_fcp_io_submit,
1623 .ls_abort = lpfc_nvme_ls_abort,
1624 .fcp_abort = lpfc_nvme_fcp_abort,
1627 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1628 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1629 .dma_boundary = 0xFFFFFFFF,
1631 /* Sizes of additional private data for data structures.
1632 * No use for the last two sizes at this time.
1634 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1635 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1636 .lsrqst_priv_sz = 0,
1637 .fcprqst_priv_sz = 0,
1641 * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1642 * @phba: pointer to lpfc hba data structure.
1643 * @nblist: pointer to nvme buffer list.
1644 * @count: number of scsi buffers on the list.
1646 * This routine is invoked to post a block of @count scsi sgl pages from a
1647 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1652 lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1653 struct list_head *nblist,
1656 struct lpfc_nvme_buf *lpfc_ncmd;
1657 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1658 struct sgl_page_pairs *sgl_pg_pairs;
1661 uint32_t reqlen, alloclen, pg_pairs;
1663 uint16_t xritag_start = 0;
1665 uint32_t shdr_status, shdr_add_status;
1666 dma_addr_t pdma_phys_bpl1;
1667 union lpfc_sli4_cfg_shdr *shdr;
1669 /* Calculate the requested length of the dma memory */
1670 reqlen = count * sizeof(struct sgl_page_pairs) +
1671 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1672 if (reqlen > SLI4_PAGE_SIZE) {
1673 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1674 "6118 Block sgl registration required DMA "
1675 "size (%d) great than a page\n", reqlen);
1678 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1681 "6119 Failed to allocate mbox cmd memory\n");
1685 /* Allocate DMA memory and set up the non-embedded mailbox command */
1686 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1687 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1688 LPFC_SLI4_MBX_NEMBED);
1690 if (alloclen < reqlen) {
1691 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1692 "6120 Allocated DMA memory size (%d) is "
1693 "less than the requested DMA memory "
1694 "size (%d)\n", alloclen, reqlen);
1695 lpfc_sli4_mbox_cmd_free(phba, mbox);
1699 /* Get the first SGE entry from the non-embedded DMA memory */
1700 viraddr = mbox->sge_array->addr[0];
1702 /* Set up the SGL pages in the non-embedded DMA pages */
1703 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1704 sgl_pg_pairs = &sgl->sgl_pg_pairs;
1707 list_for_each_entry(lpfc_ncmd, nblist, list) {
1708 /* Set up the sge entry */
1709 sgl_pg_pairs->sgl_pg0_addr_lo =
1710 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
1711 sgl_pg_pairs->sgl_pg0_addr_hi =
1712 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
1713 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1714 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
1718 sgl_pg_pairs->sgl_pg1_addr_lo =
1719 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
1720 sgl_pg_pairs->sgl_pg1_addr_hi =
1721 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
1722 /* Keep the first xritag on the list */
1724 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
1728 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
1729 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
1730 /* Perform endian conversion if necessary */
1731 sgl->word0 = cpu_to_le32(sgl->word0);
1733 if (!phba->sli4_hba.intr_enable)
1734 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1736 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
1737 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
1739 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
1740 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1741 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1742 if (rc != MBX_TIMEOUT)
1743 lpfc_sli4_mbox_cmd_free(phba, mbox);
1744 if (shdr_status || shdr_add_status || rc) {
1745 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1746 "6125 POST_SGL_BLOCK mailbox command failed "
1747 "status x%x add_status x%x mbx status x%x\n",
1748 shdr_status, shdr_add_status, rc);
1755 * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
1756 * @phba: pointer to lpfc hba data structure.
1757 * @post_nblist: pointer to the nvme buffer list.
1759 * This routine walks a list of nvme buffers that was passed in. It attempts
1760 * to construct blocks of nvme buffer sgls which contains contiguous xris and
1761 * uses the non-embedded SGL block post mailbox commands to post to the port.
1762 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
1763 * embedded SGL post mailbox command for posting. The @post_nblist passed in
1764 * must be local list, thus no lock is needed when manipulate the list.
1766 * Returns: 0 = failure, non-zero number of successfully posted buffers.
1769 lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1770 struct list_head *post_nblist, int sb_count)
1772 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
1773 int status, sgl_size;
1774 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
1775 dma_addr_t pdma_phys_sgl1;
1776 int last_xritag = NO_XRI;
1778 LIST_HEAD(prep_nblist);
1779 LIST_HEAD(blck_nblist);
1780 LIST_HEAD(nvme_nblist);
1786 sgl_size = phba->cfg_sg_dma_buf_size;
1788 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
1789 list_del_init(&lpfc_ncmd->list);
1791 if ((last_xritag != NO_XRI) &&
1792 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
1793 /* a hole in xri block, form a sgl posting block */
1794 list_splice_init(&prep_nblist, &blck_nblist);
1795 post_cnt = block_cnt - 1;
1796 /* prepare list for next posting block */
1797 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1800 /* prepare list for next posting block */
1801 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1802 /* enough sgls for non-embed sgl mbox command */
1803 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
1804 list_splice_init(&prep_nblist, &blck_nblist);
1805 post_cnt = block_cnt;
1810 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1812 /* end of repost sgl list condition for NVME buffers */
1813 if (num_posting == sb_count) {
1814 if (post_cnt == 0) {
1815 /* last sgl posting block */
1816 list_splice_init(&prep_nblist, &blck_nblist);
1817 post_cnt = block_cnt;
1818 } else if (block_cnt == 1) {
1819 /* last single sgl with non-contiguous xri */
1820 if (sgl_size > SGL_PAGE_SIZE)
1822 lpfc_ncmd->dma_phys_sgl +
1826 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1827 status = lpfc_sli4_post_sgl(phba,
1828 lpfc_ncmd->dma_phys_sgl,
1829 pdma_phys_sgl1, cur_xritag);
1831 /* failure, put on abort nvme list */
1832 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1834 /* success, put on NVME buffer list */
1835 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1836 lpfc_ncmd->status = IOSTAT_SUCCESS;
1839 /* success, put on NVME buffer sgl list */
1840 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1844 /* continue until a nembed page worth of sgls */
1848 /* post block of NVME buffer list sgls */
1849 status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
1852 /* don't reset xirtag due to hole in xri block */
1854 last_xritag = NO_XRI;
1856 /* reset NVME buffer post count for next round of posting */
1859 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
1860 while (!list_empty(&blck_nblist)) {
1861 list_remove_head(&blck_nblist, lpfc_ncmd,
1862 struct lpfc_nvme_buf, list);
1864 /* failure, put on abort nvme list */
1865 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1867 /* success, put on NVME buffer list */
1868 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1869 lpfc_ncmd->status = IOSTAT_SUCCESS;
1872 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1875 /* Push NVME buffers with sgl posted to the available list */
1876 while (!list_empty(&nvme_nblist)) {
1877 list_remove_head(&nvme_nblist, lpfc_ncmd,
1878 struct lpfc_nvme_buf, list);
1879 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1885 * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
1886 * @phba: pointer to lpfc hba data structure.
1888 * This routine walks the list of nvme buffers that have been allocated and
1889 * repost them to the port by using SGL block post. This is needed after a
1890 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
1891 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
1892 * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
1894 * Returns: 0 = success, non-zero failure.
1897 lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
1899 LIST_HEAD(post_nblist);
1900 int num_posted, rc = 0;
1902 /* get all NVME buffers need to repost to a local list */
1903 spin_lock_irq(&phba->nvme_buf_list_get_lock);
1904 spin_lock(&phba->nvme_buf_list_put_lock);
1905 list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
1906 list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
1907 spin_unlock(&phba->nvme_buf_list_put_lock);
1908 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
1910 /* post the list of nvme buffer sgls to port if available */
1911 if (!list_empty(&post_nblist)) {
1912 num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
1913 phba->sli4_hba.nvme_xri_cnt);
1914 /* failed to post any nvme buffer, return error */
1915 if (num_posted == 0)
1922 * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
1923 * @vport: The virtual port for which this call being executed.
1924 * @num_to_allocate: The requested number of buffers to allocate.
1926 * This routine allocates nvme buffers for device with SLI-4 interface spec,
1927 * the nvme buffer contains all the necessary information needed to initiate
1928 * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
1929 * them on a list, it post them to the port by using SGL block post.
1932 * int - number of nvme buffers that were allocated and posted.
1933 * 0 = failure, less than num_to_alloc is a partial failure.
1936 lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
1938 struct lpfc_hba *phba = vport->phba;
1939 struct lpfc_nvme_buf *lpfc_ncmd;
1940 struct lpfc_iocbq *pwqeq;
1941 union lpfc_wqe128 *wqe;
1942 struct sli4_sge *sgl;
1943 dma_addr_t pdma_phys_sgl;
1944 uint16_t iotag, lxri = 0;
1945 int bcnt, num_posted, sgl_size;
1946 LIST_HEAD(prep_nblist);
1947 LIST_HEAD(post_nblist);
1948 LIST_HEAD(nvme_nblist);
1950 sgl_size = phba->cfg_sg_dma_buf_size;
1952 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
1953 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
1957 * Get memory from the pci pool to map the virt space to
1958 * pci bus space for an I/O. The DMA buffer includes the
1959 * number of SGE's necessary to support the sg_tablesize.
1961 lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
1963 &lpfc_ncmd->dma_handle);
1964 if (!lpfc_ncmd->data) {
1968 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
1970 lxri = lpfc_sli4_next_xritag(phba);
1971 if (lxri == NO_XRI) {
1972 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1973 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1977 pwqeq = &(lpfc_ncmd->cur_iocbq);
1978 wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
1980 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
1981 iotag = lpfc_sli_next_iotag(phba, pwqeq);
1983 pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1984 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1986 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1987 "6121 Failed to allocated IOTAG for"
1988 " XRI:0x%x\n", lxri);
1989 lpfc_sli4_free_xri(phba, lxri);
1992 pwqeq->sli4_lxritag = lxri;
1993 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
1994 pwqeq->iocb_flag |= LPFC_IO_NVME;
1995 pwqeq->context1 = lpfc_ncmd;
1996 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1998 /* Initialize local short-hand pointers. */
1999 lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
2000 sgl = lpfc_ncmd->nvme_sgl;
2001 pdma_phys_sgl = lpfc_ncmd->dma_handle;
2002 lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
2004 /* Rsp SGE will be filled in when we rcv an IO
2005 * from the NVME Layer to be sent.
2006 * The cmd is going to be embedded so we need a SKIP SGE.
2008 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2009 bf_set(lpfc_sli4_sge_last, sgl, 0);
2010 sgl->word2 = cpu_to_le32(sgl->word2);
2011 /* Fill in word 3 / sgl_len during cmd submission */
2013 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
2016 bf_set(wqe_erp, &wqe->generic.wqe_com, 0);
2017 /* NVME upper layers will time things out, if needed */
2018 bf_set(wqe_tmo, &wqe->generic.wqe_com, 0);
2021 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
2022 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
2024 /* add the nvme buffer to a post list */
2025 list_add_tail(&lpfc_ncmd->list, &post_nblist);
2026 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2027 phba->sli4_hba.nvme_xri_cnt++;
2028 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2030 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2031 "6114 Allocate %d out of %d requested new NVME "
2032 "buffers\n", bcnt, num_to_alloc);
2034 /* post the list of nvme buffer sgls to port if available */
2035 if (!list_empty(&post_nblist))
2036 num_posted = lpfc_post_nvme_sgl_list(phba,
2037 &post_nblist, bcnt);
2045 * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2046 * @phba: The HBA for which this call is being executed.
2048 * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2049 * and returns to caller.
2053 * Pointer to lpfc_nvme_buf - Success
2055 static struct lpfc_nvme_buf *
2056 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2058 struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2059 unsigned long iflag = 0;
2062 spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2063 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2064 &phba->lpfc_nvme_buf_list_get, list) {
2065 if (lpfc_test_rrq_active(phba, ndlp,
2066 lpfc_ncmd->cur_iocbq.sli4_lxritag))
2068 list_del(&lpfc_ncmd->list);
2073 spin_lock(&phba->nvme_buf_list_put_lock);
2074 list_splice(&phba->lpfc_nvme_buf_list_put,
2075 &phba->lpfc_nvme_buf_list_get);
2076 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2077 spin_unlock(&phba->nvme_buf_list_put_lock);
2078 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2079 &phba->lpfc_nvme_buf_list_get, list) {
2080 if (lpfc_test_rrq_active(
2081 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
2083 list_del(&lpfc_ncmd->list);
2088 spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2095 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2096 * @phba: The Hba for which this call is being executed.
2097 * @lpfc_ncmd: The nvme buffer which is being released.
2099 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2100 * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2101 * and cannot be reused for at least RA_TOV amount of time if it was
2105 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2107 unsigned long iflag = 0;
2109 lpfc_ncmd->nonsg_phys = 0;
2110 if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2111 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2113 lpfc_ncmd->nvmeCmd = NULL;
2114 list_add_tail(&lpfc_ncmd->list,
2115 &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2116 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2119 lpfc_ncmd->nvmeCmd = NULL;
2120 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2121 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2122 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2123 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2128 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2129 * @pvport - the lpfc_vport instance requesting a localport.
2131 * This routine is invoked to create an nvme localport instance to bind
2132 * to the nvme_fc_transport. It is called once during driver load
2133 * like lpfc_create_shost after all other services are initialized.
2134 * It requires a vport, vpi, and wwns at call time. Other localport
2135 * parameters are modified as the driver's FCID and the Fabric WWN
2140 * -ENOMEM - no heap memory available
2141 * other values - from nvme registration upcall
2144 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2147 struct lpfc_hba *phba = vport->phba;
2148 struct nvme_fc_port_info nfcp_info;
2149 struct nvme_fc_local_port *localport;
2150 struct lpfc_nvme_lport *lport;
2153 /* Initialize this localport instance. The vport wwn usage ensures
2154 * that NPIV is accounted for.
2156 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2157 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2158 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2159 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2161 /* For now need + 1 to get around NVME transport logic */
2162 lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
2163 lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2165 /* localport is allocated from the stack, but the registration
2166 * call allocates heap memory as well as the private area.
2168 #if (IS_ENABLED(CONFIG_NVME_FC))
2169 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2170 &vport->phba->pcidev->dev, &localport);
2175 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2176 "6005 Successfully registered local "
2177 "NVME port num %d, localP %p, private %p, "
2179 localport->port_num, localport,
2181 lpfc_nvme_template.max_sgl_segments);
2183 /* Private is our lport size declared in the template. */
2184 lport = (struct lpfc_nvme_lport *)localport->private;
2185 vport->localport = localport;
2186 lport->vport = vport;
2187 INIT_LIST_HEAD(&lport->rport_list);
2188 vport->nvmei_support = 1;
2189 len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2190 vport->phba->total_nvme_bufs += len;
2197 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2198 * @pnvme: pointer to lpfc nvme data structure.
2200 * This routine is invoked to destroy all lports bound to the phba.
2201 * The lport memory was allocated by the nvme fc transport and is
2202 * released there. This routine ensures all rports bound to the
2203 * lport have been disconnected.
2207 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2209 #if (IS_ENABLED(CONFIG_NVME_FC))
2210 struct nvme_fc_local_port *localport;
2211 struct lpfc_nvme_lport *lport;
2212 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
2215 if (vport->nvmei_support == 0)
2218 localport = vport->localport;
2219 vport->localport = NULL;
2220 lport = (struct lpfc_nvme_lport *)localport->private;
2222 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2223 "6011 Destroying NVME localport %p\n",
2225 list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
2226 /* The last node ref has to get released now before the rport
2227 * private memory area is released by the transport.
2229 list_del(&rport->list);
2231 init_completion(&rport->rport_unreg_done);
2232 ret = nvme_fc_unregister_remoteport(rport->remoteport);
2234 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2235 "6008 rport fail destroy %x\n", ret);
2236 wait_for_completion_timeout(&rport->rport_unreg_done, 5);
2239 /* lport's rport list is clear. Unregister
2240 * lport and release resources.
2242 init_completion(&lport->lport_unreg_done);
2243 ret = nvme_fc_unregister_localport(localport);
2244 wait_for_completion_timeout(&lport->lport_unreg_done, 5);
2246 /* Regardless of the unregister upcall response, clear
2247 * nvmei_support. All rports are unregistered and the
2248 * driver will clean up.
2250 vport->nvmei_support = 0;
2252 lpfc_printf_vlog(vport,
2253 KERN_INFO, LOG_NVME_DISC,
2254 "6009 Unregistered lport Success\n");
2256 lpfc_printf_vlog(vport,
2257 KERN_INFO, LOG_NVME_DISC,
2258 "6010 Unregistered lport "
2259 "Failed, status x%x\n",
2266 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2268 #if (IS_ENABLED(CONFIG_NVME_FC))
2269 struct nvme_fc_local_port *localport;
2270 struct lpfc_nvme_lport *lport;
2272 localport = vport->localport;
2274 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2275 "6710 Update NVME fail. No localport\n");
2278 lport = (struct lpfc_nvme_lport *)localport->private;
2280 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2281 "6171 Update NVME fail. localP %p, No lport\n",
2285 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2286 "6012 Update NVME lport %p did x%x\n",
2287 localport, vport->fc_myDID);
2289 localport->port_id = vport->fc_myDID;
2290 if (localport->port_id == 0)
2291 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2293 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2295 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2296 "6030 bound lport %p to DID x%06x\n",
2297 lport, localport->port_id);
2302 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2304 #if (IS_ENABLED(CONFIG_NVME_FC))
2306 struct nvme_fc_local_port *localport;
2307 struct lpfc_nvme_lport *lport;
2308 struct lpfc_nvme_rport *rport;
2309 struct nvme_fc_remote_port *remote_port;
2310 struct nvme_fc_port_info rpinfo;
2312 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2313 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2314 ndlp->nlp_DID, ndlp->nlp_type);
2316 localport = vport->localport;
2317 lport = (struct lpfc_nvme_lport *)localport->private;
2319 if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2321 /* The driver isn't expecting the rport wwn to change
2322 * but it might get a different DID on a different
2325 list_for_each_entry(rport, &lport->rport_list, list) {
2326 if (rport->remoteport->port_name !=
2327 wwn_to_u64(ndlp->nlp_portname.u.wwn))
2329 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2330 "6035 lport %p, found matching rport "
2331 "at wwpn 0x%llx, Data: x%x x%x x%x "
2334 rport->remoteport->port_name,
2335 rport->remoteport->port_id,
2336 rport->remoteport->port_role,
2339 remote_port = rport->remoteport;
2340 if ((remote_port->port_id == 0) &&
2341 (remote_port->port_role ==
2342 FC_PORT_ROLE_NVME_DISCOVERY)) {
2343 remote_port->port_id = ndlp->nlp_DID;
2344 remote_port->port_role &=
2345 ~FC_PORT_ROLE_NVME_DISCOVERY;
2346 if (ndlp->nlp_type & NLP_NVME_TARGET)
2347 remote_port->port_role |=
2348 FC_PORT_ROLE_NVME_TARGET;
2349 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2350 remote_port->port_role |=
2351 FC_PORT_ROLE_NVME_INITIATOR;
2353 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2355 "6014 Rebinding lport to "
2356 "rport wwpn 0x%llx, "
2357 "Data: x%x x%x x%x x%06x\n",
2358 remote_port->port_name,
2359 remote_port->port_id,
2360 remote_port->port_role,
2367 /* NVME rports are not preserved across devloss.
2368 * Just register this instance.
2370 rpinfo.port_id = ndlp->nlp_DID;
2371 rpinfo.port_role = 0;
2372 if (ndlp->nlp_type & NLP_NVME_TARGET)
2373 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2374 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2375 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2376 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2377 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2378 ret = nvme_fc_register_remoteport(localport, &rpinfo,
2381 rport = remote_port->private;
2382 rport->remoteport = remote_port;
2383 rport->lport = lport;
2384 rport->ndlp = lpfc_nlp_get(ndlp);
2387 ndlp->nrport = rport;
2388 INIT_LIST_HEAD(&rport->list);
2389 list_add_tail(&rport->list, &lport->rport_list);
2390 lpfc_printf_vlog(vport, KERN_INFO,
2391 LOG_NVME_DISC | LOG_NODE,
2392 "6022 Binding new rport to lport %p "
2393 "Rport WWNN 0x%llx, Rport WWPN 0x%llx "
2394 "DID x%06x Role x%x\n",
2396 rpinfo.node_name, rpinfo.port_name,
2397 rpinfo.port_id, rpinfo.port_role);
2399 lpfc_printf_vlog(vport, KERN_ERR,
2400 LOG_NVME_DISC | LOG_NODE,
2401 "6031 RemotePort Registration failed "
2402 "err: %d, DID x%06x\n",
2403 ret, ndlp->nlp_DID);
2407 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2408 "6027 Unknown nlp_type x%x on DID x%06x "
2409 "ndlp %p. Not Registering nvme rport\n",
2410 ndlp->nlp_type, ndlp->nlp_DID, ndlp);
2418 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2420 * There is no notion of Devloss or rport recovery from the current
2421 * nvme_transport perspective. Loss of an rport just means IO cannot
2422 * be sent and recovery is completely up to the initator.
2423 * For now, the driver just unbinds the DID and port_role so that
2424 * no further IO can be issued. Changes are planned for later.
2426 * Notes - the ndlp reference count is not decremented here since
2427 * since there is no nvme_transport api for devloss. Node ref count
2428 * is only adjusted in driver unload.
2431 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2433 #if (IS_ENABLED(CONFIG_NVME_FC))
2435 struct nvme_fc_local_port *localport;
2436 struct lpfc_nvme_lport *lport;
2437 struct lpfc_nvme_rport *rport;
2438 struct nvme_fc_remote_port *remoteport;
2439 unsigned long wait_tmo;
2441 localport = vport->localport;
2443 /* This is fundamental error. The localport is always
2444 * available until driver unload. Just exit.
2449 lport = (struct lpfc_nvme_lport *)localport->private;
2453 rport = ndlp->nrport;
2457 remoteport = rport->remoteport;
2458 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2459 "6033 Unreg nvme remoteport %p, portname x%llx, "
2460 "port_id x%06x, portstate x%x port type x%x\n",
2461 remoteport, remoteport->port_name,
2462 remoteport->port_id, remoteport->port_state,
2465 /* Sanity check ndlp type. Only call for NVME ports. Don't
2466 * clear any rport state until the transport calls back.
2468 if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2469 init_completion(&rport->rport_unreg_done);
2470 ret = nvme_fc_unregister_remoteport(remoteport);
2472 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2473 "6167 NVME unregister failed %d "
2475 ret, remoteport->port_state);
2478 /* Wait for the driver's delete completion routine to finish
2479 * before proceeding. This guarantees the transport and driver
2480 * have completed the unreg process.
2482 wait_tmo = msecs_to_jiffies(5000);
2483 ret = wait_for_completion_timeout(&rport->rport_unreg_done,
2486 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2487 "6169 Unreg nvme wait timeout\n");
2494 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2495 "6168 State error: lport %p, rport%p FCID x%06x\n",
2496 vport->localport, ndlp->rport, ndlp->nlp_DID);
2500 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2501 * @phba: pointer to lpfc hba data structure.
2502 * @axri: pointer to the fcp xri abort wcqe structure.
2504 * This routine is invoked by the worker thread to process a SLI4 fast-path
2508 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2509 struct sli4_wcqe_xri_aborted *axri)
2511 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2512 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
2513 struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
2514 struct lpfc_nodelist *ndlp;
2515 unsigned long iflag = 0;
2518 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2520 spin_lock_irqsave(&phba->hbalock, iflag);
2521 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2522 list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2523 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
2525 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2526 list_del(&lpfc_ncmd->list);
2527 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2528 lpfc_ncmd->status = IOSTAT_SUCCESS;
2530 &phba->sli4_hba.abts_nvme_buf_list_lock);
2532 rrq_empty = list_empty(&phba->active_rrq_list);
2533 spin_unlock_irqrestore(&phba->hbalock, iflag);
2534 ndlp = lpfc_ncmd->ndlp;
2536 lpfc_set_rrq_active(
2538 lpfc_ncmd->cur_iocbq.sli4_lxritag,
2540 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2542 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2544 lpfc_worker_wake_up(phba);
2548 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
2549 spin_unlock_irqrestore(&phba->hbalock, iflag);