]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[SCSI] lpfc 8.3.12: Emulex SLI enhancements
authorJames Smart <james.smart@emulex.com>
Tue, 6 Apr 2010 18:48:51 +0000 (14:48 -0400)
committerJames Bottomley <James.Bottomley@suse.de>
Sun, 11 Apr 2010 18:44:44 +0000 (13:44 -0500)
- Add the new Logical Link speed event support.
- Add RATOV and EDTOV to the REG_VFI mailbox command.

Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h

index 6c71ea416634ecc8519c2c247211423181887abe..6ba1f307ccce97e6b4a880be356eb6993cb8752b 100644 (file)
@@ -1500,6 +1500,7 @@ typedef struct {          /* FireFly BIU registers */
 #define MBXERR_BAD_RCV_LENGTH       14
 #define MBXERR_DMA_ERROR            15
 #define MBXERR_ERROR                16
+#define MBXERR_UNKNOWN_CMD          18
 #define MBXERR_LINK_DOWN            0x33
 #define MBX_NOT_FINISHED           255
 
index bff98add80cd06f1ddf7188805e571aabc52c0f3..bbdcf96800f619e952bdcb64df043bf789caf68e 100644 (file)
@@ -787,6 +787,7 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_EQ_DESTROY            0x37
 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG          0x3A
 #define LPFC_MBOX_OPCODE_FUNCTION_RESET                0x3D
+#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT         0x5A
 
 /* FCoE Opcodes */
 #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE                        0x01
@@ -1108,6 +1109,39 @@ struct lpfc_mbx_mq_create {
        } u;
 };
 
+struct lpfc_mbx_mq_create_ext {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_mq_create_ext_num_pages_SHIFT         0
+#define lpfc_mbx_mq_create_ext_num_pages_MASK          0x0000FFFF
+#define lpfc_mbx_mq_create_ext_num_pages_WORD          word0
+                       uint32_t async_evt_bmap;
+#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT    LPFC_TRAILER_CODE_LINK
+#define lpfc_mbx_mq_create_ext_async_evt_link_MASK     0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_link_WORD     async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT  LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK   0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD   async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT  LPFC_TRAILER_CODE_GRP5
+#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK   0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD   async_evt_bmap
+                       struct mq_context context;
+                       struct dma_address page[LPFC_MAX_MQ_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT  0
+#define lpfc_mbx_mq_create_q_id_MASK   0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD   word0
+               } response;
+       } u;
+#define LPFC_ASYNC_EVENT_LINK_STATE    0x2
+#define LPFC_ASYNC_EVENT_FCF_STATE     0x4
+#define LPFC_ASYNC_EVENT_GROUP5                0x20
+};
+
 struct lpfc_mbx_mq_destroy {
        struct mbox_header header;
        union {
@@ -1434,8 +1468,8 @@ struct lpfc_mbx_reg_vfi {
 #define lpfc_reg_vfi_fcfi_WORD         word2
        uint32_t wwn[2];
        struct ulp_bde64 bde;
-       uint32_t word8_rsvd;
-       uint32_t word9_rsvd;
+       uint32_t e_d_tov;
+       uint32_t r_a_tov;
        uint32_t word10;
 #define lpfc_reg_vfi_nport_id_SHIFT            0
 #define lpfc_reg_vfi_nport_id_MASK             0x00FFFFFF
@@ -2048,6 +2082,7 @@ struct lpfc_mqe {
                struct lpfc_mbx_reg_fcfi reg_fcfi;
                struct lpfc_mbx_unreg_fcfi unreg_fcfi;
                struct lpfc_mbx_mq_create mq_create;
+               struct lpfc_mbx_mq_create_ext mq_create_ext;
                struct lpfc_mbx_eq_create eq_create;
                struct lpfc_mbx_cq_create cq_create;
                struct lpfc_mbx_wq_create wq_create;
@@ -2106,6 +2141,7 @@ struct lpfc_mcqe {
 #define LPFC_TRAILER_CODE_LINK 0x1
 #define LPFC_TRAILER_CODE_FCOE 0x2
 #define LPFC_TRAILER_CODE_DCBX 0x3
+#define LPFC_TRAILER_CODE_GRP5 0x5
 };
 
 struct lpfc_acqe_link {
@@ -2175,6 +2211,19 @@ struct lpfc_acqe_dcbx {
        uint32_t trailer;
 };
 
+struct lpfc_acqe_grp5 {
+       uint32_t word0;
+#define lpfc_acqe_grp5_pport_SHIFT     0
+#define lpfc_acqe_grp5_pport_MASK      0x000000FF
+#define lpfc_acqe_grp5_pport_WORD      word0
+       uint32_t word1;
+#define lpfc_acqe_grp5_llink_spd_SHIFT 16
+#define lpfc_acqe_grp5_llink_spd_MASK  0x0000FFFF
+#define lpfc_acqe_grp5_llink_spd_WORD  word1
+       uint32_t event_tag;
+       uint32_t trailer;
+};
+
 /*
  * Define the bootstrap mailbox (bmbx) region used to communicate
  * mailbox command between the host and port. The mailbox consists
index 56421c714bf84cabd38af838fe7d383ed145b839..8341d44fe87b50a02ca05e0386b0b19900ba6e08 100644 (file)
@@ -3525,6 +3525,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
                        "handled yet\n");
 }
 
+/**
+ * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async grp5 completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
+ * is an asynchronous notified of a logical link speed change.  The Port
+ * reports the logical link speed in units of 10Mbps.
+ **/
+static void
+lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
+                        struct lpfc_acqe_grp5 *acqe_grp5)
+{
+       uint16_t prev_ll_spd;
+
+       phba->fc_eventTag = acqe_grp5->event_tag;
+       phba->fcoe_eventtag = acqe_grp5->event_tag;
+       prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
+       phba->sli4_hba.link_state.logical_speed =
+               (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "2789 GRP5 Async Event: Updating logical link speed "
+                       "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
+                       (phba->sli4_hba.link_state.logical_speed*10));
+}
+
 /**
  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
  * @phba: pointer to lpfc hba data structure.
@@ -3561,6 +3587,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
                        lpfc_sli4_async_dcbx_evt(phba,
                                                 &cq_event->cqe.acqe_dcbx);
                        break;
+               case LPFC_TRAILER_CODE_GRP5:
+                       lpfc_sli4_async_grp5_evt(phba,
+                                                &cq_event->cqe.acqe_grp5);
+                       break;
                default:
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "1804 Invalid asynchrous event code: "
index a6b7f5a0210ba4797c49e6ea8b9a0cb523d54157..f9b056ec6186d4165c97cc09b2b7de981c830298 100644 (file)
@@ -1899,6 +1899,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
        memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
        reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
        reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
+       reg_vfi->e_d_tov = vport->phba->fc_edtov;
+       reg_vfi->r_a_tov = vport->phba->fc_ratov;
        reg_vfi->bde.addrHigh = putPaddrHigh(phys);
        reg_vfi->bde.addrLow = putPaddrLow(phys);
        reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
index 2f7018821531e2443d69906a097859d93df00ced..2c88999b7095ceae6786cf4be254e0ecdd871fa5 100644 (file)
@@ -9833,10 +9833,71 @@ out:
        return status;
 }
 
+/**
+ * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ * @mbox: An allocated pointer to type LPFC_MBOXQ_t
+ * @cq: The completion queue to associate with this cq.
+ *
+ * This function provides failback (fb) functionality when the
+ * mq_create_ext fails on older FW generations.  It's purpose is identical
+ * to mq_create_ext otherwise.
+ *
+ * This routine cannot fail as all attributes were previously accessed and
+ * initialized in mq_create_ext.
+ **/
+static void
+lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
+                      LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
+{
+       struct lpfc_mbx_mq_create *mq_create;
+       struct lpfc_dmabuf *dmabuf;
+       int length;
+
+       length = (sizeof(struct lpfc_mbx_mq_create) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_MQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       mq_create = &mbox->u.mqe.un.mq_create;
+       bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+              mq->page_count);
+       bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+              cq->queue_id);
+       bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+       switch (mq->entry_count) {
+       case 16:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_16);
+               break;
+       case 32:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_32);
+               break;
+       case 64:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_64);
+               break;
+       case 128:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_128);
+               break;
+       }
+       list_for_each_entry(dmabuf, &mq->page_list, list) {
+               mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                       putPaddrLow(dmabuf->phys);
+               mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                       putPaddrHigh(dmabuf->phys);
+       }
+}
+
 /**
  * lpfc_mq_create - Create a mailbox Queue on the HBA
  * @phba: HBA structure that indicates port to create a queue on.
  * @mq: The queue structure to use to create the mailbox queue.
+ * @cq: The completion queue to associate with this cq.
+ * @subtype: The queue's subtype.
  *
  * This function creates a mailbox queue, as detailed in @mq, on a port,
  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
@@ -9852,31 +9913,40 @@ out:
  * memory this function will return ENOMEM. If the queue create mailbox command
  * fails this function will return ENXIO.
  **/
-uint32_t
+int32_t
 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
               struct lpfc_queue *cq, uint32_t subtype)
 {
        struct lpfc_mbx_mq_create *mq_create;
+       struct lpfc_mbx_mq_create_ext *mq_create_ext;
        struct lpfc_dmabuf *dmabuf;
        LPFC_MBOXQ_t *mbox;
        int rc, length, status = 0;
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
 
+
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
                return -ENOMEM;
-       length = (sizeof(struct lpfc_mbx_mq_create) -
+       length = (sizeof(struct lpfc_mbx_mq_create_ext) -
                  sizeof(struct lpfc_sli4_cfg_mhdr));
        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
-                        LPFC_MBOX_OPCODE_MQ_CREATE,
+                        LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
                         length, LPFC_SLI4_MBX_EMBED);
-       mq_create = &mbox->u.mqe.un.mq_create;
-       bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+
+       mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+       bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
                    mq->page_count);
-       bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
-                   cq->queue_id);
-       bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+       bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
+              1);
+       bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+              &mq_create_ext->u.request, 1);
+       bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
+              &mq_create_ext->u.request, 1);
+       bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+              cq->queue_id);
+       bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
        switch (mq->entry_count) {
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -9886,31 +9956,46 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
                        return -EINVAL;
                /* otherwise default to smallest count (drop through) */
        case 16:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
                       LPFC_MQ_CNT_16);
                break;
        case 32:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
                       LPFC_MQ_CNT_32);
                break;
        case 64:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
                       LPFC_MQ_CNT_64);
                break;
        case 128:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
                       LPFC_MQ_CNT_128);
                break;
        }
        list_for_each_entry(dmabuf, &mq->page_list, list) {
-               mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+               mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
                                        putPaddrLow(dmabuf->phys);
-               mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+               mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
                                        putPaddrHigh(dmabuf->phys);
        }
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
+       mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+                             &mq_create_ext->u.response);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2795 MQ_CREATE_EXT failed with "
+                               "status x%x. Failback to MQ_CREATE.\n",
+                               rc);
+               lpfc_mq_create_fb_init(phba, mq, mbox, cq);
+               mq_create = &mbox->u.mqe.un.mq_create;
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+               shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+               mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
+                                     &mq_create->u.response);
+       }
+
        /* The IOCTL status is embedded in the mailbox subheader. */
-       shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
        if (shdr_status || shdr_add_status || rc) {
@@ -9921,7 +10006,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
                status = -ENXIO;
                goto out;
        }
-       mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
        if (mq->queue_id == 0xFFFF) {
                status = -ENXIO;
                goto out;
index 54a5e0bc827f1afb6f7de67a37e0439a628e247b..e3792151ca064a3d207216a907589186782705a0 100644 (file)
@@ -36,6 +36,7 @@ struct lpfc_cq_event {
                struct lpfc_acqe_link           acqe_link;
                struct lpfc_acqe_fcoe           acqe_fcoe;
                struct lpfc_acqe_dcbx           acqe_dcbx;
+               struct lpfc_acqe_grp5           acqe_grp5;
                struct lpfc_rcqe                rcqe_cmpl;
                struct sli4_wcqe_xri_aborted    wcqe_axri;
                struct lpfc_wcqe_complete       wcqe_cmpl;
index 5b6cb9742c58948ed49a1770d8bb281655b54ece..58bb4c81b54e1698edde6aa89320a768393b8d5f 100644 (file)
@@ -493,8 +493,8 @@ void lpfc_sli4_queue_free(struct lpfc_queue *);
 uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
 uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
                        struct lpfc_queue *, uint32_t, uint32_t);
-uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
-                       struct lpfc_queue *, uint32_t);
+int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+                      struct lpfc_queue *, uint32_t);
 uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
                        struct lpfc_queue *, uint32_t);
 uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,