]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'scsi/for-next'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:59:32 +0000 (14:59 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:59:32 +0000 (14:59 +0200)
69 files changed:
MAINTAINERS
drivers/scsi/BusLogic.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/esas2r/esas2r.h
drivers/scsi/esas2r/esas2r_disc.c
drivers/scsi/esas2r/esas2r_flash.c
drivers/scsi/esas2r/esas2r_init.c
drivers/scsi/esas2r/esas2r_int.c
drivers/scsi/esas2r/esas2r_io.c
drivers/scsi/esas2r/esas2r_ioctl.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/esas2r/esas2r_targdb.c
drivers/scsi/esas2r/esas2r_vda.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_isr.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/fnic_stats.h [new file with mode: 0644]
drivers/scsi/fnic/fnic_trace.c
drivers/scsi/fnic/fnic_trace.h
drivers/scsi/hpsa.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_ctl.h
drivers/scsi/pm8001/pm8001_defs.h
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_hwi.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/pm8001/pm80xx_hwi.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_inline.h
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
include/scsi/iscsi_if.h
include/scsi/scsi_transport_iscsi.h

index 40cf7cd5fa0e6f35368649dee282d802b6b0da92..13a631bb788fdb604d69af172427cc2a73c2c6f2 100644 (file)
@@ -1872,7 +1872,7 @@ S:        Supported
 F:     drivers/net/wireless/brcm80211/
 
 BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
-M:     Bhanu Prakash Gollapudi <bprakash@broadcom.com>
+M:     Eddie Wai <eddie.wai@broadcom.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/bnx2fc/
index 757eb0716d45d4f273519ca2e2dd2ed03486a4a8..972f8176665fd07d5b9c6bee25cea3e8e9286fc1 100644 (file)
@@ -26,8 +26,8 @@
 
 */
 
-#define blogic_drvr_version            "2.1.16"
-#define blogic_drvr_date               "18 July 2002"
+#define blogic_drvr_version            "2.1.17"
+#define blogic_drvr_date               "12 September 2013"
 
 #include <linux/module.h>
 #include <linux/init.h>
@@ -311,12 +311,14 @@ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter)
   caller.
 */
 
-static void blogic_dealloc_ccb(struct blogic_ccb *ccb)
+static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
 {
        struct blogic_adapter *adapter = ccb->adapter;
 
-       scsi_dma_unmap(ccb->command);
-       pci_unmap_single(adapter->pci_device, ccb->sensedata,
+       if (ccb->command != NULL)
+               scsi_dma_unmap(ccb->command);
+       if (dma_unmap)
+               pci_unmap_single(adapter->pci_device, ccb->sensedata,
                         ccb->sense_datalen, PCI_DMA_FROMDEVICE);
 
        ccb->command = NULL;
@@ -2762,8 +2764,8 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
                        /*
                           Place CCB back on the Host Adapter's free list.
                         */
-                       blogic_dealloc_ccb(ccb);
-#if 0                          /* this needs to be redone different for new EH */
+                       blogic_dealloc_ccb(ccb, 1);
+#if 0                  /* this needs to be redone different for new EH */
                        /*
                           Bus Device Reset CCBs have the command field
                           non-NULL only when a Bus Device Reset was requested
@@ -2791,7 +2793,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
                                if (ccb->status == BLOGIC_CCB_RESET &&
                                                ccb->tgt_id == tgt_id) {
                                        command = ccb->command;
-                                       blogic_dealloc_ccb(ccb);
+                                       blogic_dealloc_ccb(ccb, 1);
                                        adapter->active_cmds[tgt_id]--;
                                        command->result = DID_RESET << 16;
                                        command->scsi_done(command);
@@ -2862,7 +2864,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
                        /*
                           Place CCB back on the Host Adapter's free list.
                         */
-                       blogic_dealloc_ccb(ccb);
+                       blogic_dealloc_ccb(ccb, 1);
                        /*
                           Call the SCSI Command Completion Routine.
                         */
@@ -3034,6 +3036,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
        int buflen = scsi_bufflen(command);
        int count;
        struct blogic_ccb *ccb;
+       dma_addr_t sense_buf;
 
        /*
           SCSI REQUEST_SENSE commands will be executed automatically by the
@@ -3179,10 +3182,17 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
        }
        memcpy(ccb->cdb, cdb, cdblen);
        ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
-       ccb->sensedata = pci_map_single(adapter->pci_device,
+       ccb->command = command;
+       sense_buf = pci_map_single(adapter->pci_device,
                                command->sense_buffer, ccb->sense_datalen,
                                PCI_DMA_FROMDEVICE);
-       ccb->command = command;
+       if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
+               blogic_err("DMA mapping for sense data buffer failed\n",
+                               adapter);
+               blogic_dealloc_ccb(ccb, 0);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+       ccb->sensedata = sense_buf;
        command->scsi_done = comp_cb;
        if (blogic_multimaster_type(adapter)) {
                /*
@@ -3203,7 +3213,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
                        if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
                                                ccb)) {
                                blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
-                               blogic_dealloc_ccb(ccb);
+                               blogic_dealloc_ccb(ccb, 1);
                                command->result = DID_ERROR << 16;
                                command->scsi_done(command);
                        }
@@ -3337,7 +3347,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
 
        for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
                if (ccb->status == BLOGIC_CCB_ACTIVE)
-                       blogic_dealloc_ccb(ccb);
+                       blogic_dealloc_ccb(ccb, 1);
        /*
         * Wait a few seconds between the Host Adapter Hard Reset which
         * initiates a SCSI Bus Reset and issuing any SCSI Commands.  Some
index 777e7c0bbb4b327d6624a2d771a78fa5033d432c..2e28f6c419fe253231864707225e5a09b68e033d 100644 (file)
@@ -128,7 +128,7 @@ struct be_ctrl_info {
 
 #define PAGE_SHIFT_4K 12
 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-#define mcc_timeout            120000 /* 5s timeout */
+#define mcc_timeout            120000 /* 12s timeout */
 
 /* Returns number of pages spanned by the data starting at the given addr */
 #define PAGES_4K_SPANNED(_address, size)                               \
index e66aa7c11a8a21e041f8956c6ed0f758401c8420..3338391b64de9b1aba50844c014f7e5fc44706b3 100644 (file)
@@ -17,9 +17,9 @@
 
 #include <scsi/iscsi_proto.h>
 
+#include "be_main.h"
 #include "be.h"
 #include "be_mgmt.h"
-#include "be_main.h"
 
 int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
 {
@@ -158,8 +158,10 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
        struct be_cmd_resp_hdr *ioctl_resp_hdr;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
-       if (beiscsi_error(phba))
+       if (beiscsi_error(phba)) {
+               free_mcc_tag(&phba->ctrl, tag);
                return -EIO;
+       }
 
        /* wait for the mccq completion */
        rc = wait_event_interruptible_timeout(
@@ -173,7 +175,11 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                            BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
                            BEISCSI_LOG_CONFIG,
                            "BC_%d : MBX Cmd Completion timed out\n");
-               rc = -EAGAIN;
+               rc = -EBUSY;
+
+               /* decrement the mccq used count */
+               atomic_dec(&phba->ctrl.mcc_obj.q.used);
+
                goto release_mcc_tag;
        } else
                rc = 0;
@@ -208,10 +214,18 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
 
                if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
                        ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
-                       if (ioctl_resp_hdr->response_length)
-                               goto release_mcc_tag;
+                       beiscsi_log(phba, KERN_WARNING,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+                                   BEISCSI_LOG_CONFIG,
+                                   "BC_%d : Insufficent Buffer Error "
+                                   "Resp_Len : %d Actual_Resp_Len : %d\n",
+                                   ioctl_resp_hdr->response_length,
+                                   ioctl_resp_hdr->actual_resp_len);
+
+                       rc = -EAGAIN;
+                       goto release_mcc_tag;
                }
-               rc = -EAGAIN;
+               rc = -EIO;
        }
 
 release_mcc_tag:
@@ -363,7 +377,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
        } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
                    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
                     (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
-               phba->state = BE_ADAPTER_UP;
+               phba->state = BE_ADAPTER_LINK_UP;
 
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -486,33 +500,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
  **/
 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 {
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT    4000    /* 4sec */
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       uint32_t wait = 0;
+       unsigned long timeout;
+       bool read_flag = false;
+       int ret = 0, i;
        u32 ready;
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
 
-       do {
+       if (beiscsi_error(phba))
+               return -EIO;
 
-               if (beiscsi_error(phba))
-                       return -EIO;
+       timeout = jiffies + (HZ * 110);
 
-               ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
-               if (ready)
-                       break;
+       do {
+               for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
+                       ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+                       if (ready) {
+                               read_flag = true;
+                               break;
+                       }
+                       mdelay(1);
+               }
 
-               if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
-                       beiscsi_log(phba, KERN_ERR,
-                                   BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-                                   "BC_%d : FW Timed Out\n");
+               if (!read_flag) {
+                       wait_event_timeout(rdybit_check_q,
+                                         (read_flag != true),
+                                          HZ * 5);
+               }
+       } while ((time_before(jiffies, timeout)) && !read_flag);
+
+       if (!read_flag) {
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+                           "BC_%d : FW Timed Out\n");
                        phba->fw_timeout = true;
                        beiscsi_ue_detect(phba);
-                       return -EBUSY;
-               }
+                       ret = -EBUSY;
+       }
 
-               mdelay(1);
-               wait++;
-       } while (true);
-       return 0;
+       return ret;
 }
 
 /*
@@ -699,7 +727,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
        struct be_mcc_wrb *wrb;
 
-       BUG_ON(atomic_read(&mccq->used) >= mccq->len);
+       WARN_ON(atomic_read(&mccq->used) >= mccq->len);
        wrb = queue_head_node(mccq);
        memset(wrb, 0, sizeof(*wrb));
        wrb->tag0 = (mccq->head & 0x000000FF) << 16;
@@ -1009,10 +1037,29 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
        return status;
 }
 
+/**
+ * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
+ * @ctrl: ptr to ctrl_info
+ * @cq: Completion Queue
+ * @dq: Default Queue
+ * @lenght: ring size
+ * @entry_size: size of each entry in DEFQ
+ * @is_header: Header or Data DEFQ
+ * @ulp_num: Bind to which ULP
+ *
+ * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
+ * on this queue by the FW
+ *
+ * return
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ *
+ **/
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                                    struct be_queue_info *cq,
                                    struct be_queue_info *dq, int length,
-                                   int entry_size)
+                                   int entry_size, uint8_t is_header,
+                                   uint8_t ulp_num)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_defq_create_req *req = embedded_payload(wrb);
@@ -1030,6 +1077,11 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                           OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+       if (phba->fw_config.dual_ulp_aware) {
+               req->ulp_num = ulp_num;
+               req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+               req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+       }
 
        if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_be_default_pdu_context,
@@ -1067,22 +1119,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 
        status = be_mbox_notify(ctrl);
        if (!status) {
+               struct be_ring *defq_ring;
                struct be_defq_create_resp *resp = embedded_payload(wrb);
 
                dq->id = le16_to_cpu(resp->id);
                dq->created = true;
+               if (is_header)
+                       defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
+               else
+                       defq_ring = &phba->phwi_ctrlr->
+                                   default_pdu_data[ulp_num];
+
+               defq_ring->id = dq->id;
+
+               if (!phba->fw_config.dual_ulp_aware) {
+                       defq_ring->ulp_num = BEISCSI_ULP0;
+                       defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
+               } else {
+                       defq_ring->ulp_num = resp->ulp_num;
+                       defq_ring->doorbell_offset = resp->doorbell_offset;
+               }
        }
        spin_unlock(&ctrl->mbox_lock);
 
        return status;
 }
 
-int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
-                      struct be_queue_info *wrbq)
+/**
+ * be_cmd_wrbq_create()- Create WRBQ
+ * @ctrl: ptr to ctrl_info
+ * @q_mem: memory details for the queue
+ * @wrbq: queue info
+ * @pwrb_context: ptr to wrb_context
+ * @ulp_num: ULP on which the WRBQ is to be created
+ *
+ * Create WRBQ on the passed ULP_NUM.
+ *
+ **/
+int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
+                       struct be_dma_mem *q_mem,
+                       struct be_queue_info *wrbq,
+                       struct hwi_wrb_context *pwrb_context,
+                       uint8_t ulp_num)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_wrbq_create_req *req = embedded_payload(wrb);
        struct be_wrbq_create_resp *resp = embedded_payload(wrb);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        int status;
 
        spin_lock(&ctrl->mbox_lock);
@@ -1093,17 +1176,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+       if (phba->fw_config.dual_ulp_aware) {
+               req->ulp_num = ulp_num;
+               req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
+               req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
+       }
+
        be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
 
        status = be_mbox_notify(ctrl);
        if (!status) {
                wrbq->id = le16_to_cpu(resp->cid);
                wrbq->created = true;
+
+               pwrb_context->cid = wrbq->id;
+               if (!phba->fw_config.dual_ulp_aware) {
+                       pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
+                       pwrb_context->ulp_num = BEISCSI_ULP0;
+               } else {
+                       pwrb_context->ulp_num = resp->ulp_num;
+                       pwrb_context->doorbell_offset = resp->doorbell_offset;
+               }
        }
        spin_unlock(&ctrl->mbox_lock);
        return status;
 }
 
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+                                   struct be_dma_mem *q_mem)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct be_post_template_pages_req *req = embedded_payload(wrb);
+       int status;
+
+       spin_lock(&ctrl->mbox_lock);
+
+       memset(wrb, 0, sizeof(*wrb));
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
+                          sizeof(*req));
+
+       req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+       req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+       be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+       status = be_mbox_notify(ctrl);
+       spin_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct be_remove_template_pages_req *req = embedded_payload(wrb);
+       int status;
+
+       spin_lock(&ctrl->mbox_lock);
+
+       memset(wrb, 0, sizeof(*wrb));
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                          OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
+                          sizeof(*req));
+
+       req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
+
+       status = be_mbox_notify(ctrl);
+       spin_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
                                struct be_dma_mem *q_mem,
                                u32 page_offset, u32 num_pages)
index 99073086dfe06522621e57b4e8a8e2d968f7c1c7..627ebbe0172c89d6f1fdf501c09cc7f7f854a8f8 100644 (file)
@@ -40,6 +40,7 @@ struct be_mcc_wrb {
        u32 tag1;               /* dword 3 */
        u32 rsvd;               /* dword 4 */
        union {
+#define EMBED_MBX_MAX_PAYLOAD_SIZE  220
                u8 embedded_payload[236];       /* used by embedded cmds */
                struct be_sge sgl[19];  /* used by non-embedded cmds */
        } payload;
@@ -162,6 +163,8 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_CQ_CREATE                                12
 #define OPCODE_COMMON_EQ_CREATE                                13
 #define OPCODE_COMMON_MCC_CREATE                       21
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS      24
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS   25
 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES              32
 #define OPCODE_COMMON_GET_FW_VERSION                   35
 #define OPCODE_COMMON_MODIFY_EQ_DELAY                  41
@@ -217,6 +220,10 @@ struct phys_addr {
        u32 hi;
 };
 
+struct virt_addr {
+       u32 lo;
+       u32 hi;
+};
 /**************************
  * BE Command definitions *
  **************************/
@@ -722,7 +729,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl);
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                                    struct be_queue_info *cq,
                                    struct be_queue_info *dq, int length,
-                                   int entry_size);
+                                   int entry_size, uint8_t is_header,
+                                   uint8_t ulp_num);
+
+int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
+                                   struct be_dma_mem *q_mem);
+
+int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);
 
 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
                                struct be_dma_mem *q_mem, u32 page_offset,
@@ -731,7 +744,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
 int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
 
 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
-                      struct be_queue_info *wrbq);
+                      struct be_queue_info *wrbq,
+                      struct hwi_wrb_context *pwrb_context,
+                      uint8_t ulp_num);
 
 bool is_link_state_evt(u32 trailer);
 
@@ -776,7 +791,9 @@ struct be_defq_create_req {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
        u8 ulp_num;
-       u8 rsvd0;
+#define BEISCSI_DUAL_ULP_AWARE_BIT     0       /* Byte 3 - Bit 0 */
+#define BEISCSI_BIND_Q_TO_ULP_BIT      1       /* Byte 3 - Bit 1 */
+       u8 dua_feature;
        struct be_default_pdu_context context;
        struct phys_addr pages[8];
 } __packed;
@@ -784,6 +801,27 @@ struct be_defq_create_req {
 struct be_defq_create_resp {
        struct be_cmd_req_hdr hdr;
        u16 id;
+       u8 rsvd0;
+       u8 ulp_num;
+       u32 doorbell_offset;
+       u16 register_set;
+       u16 doorbell_format;
+} __packed;
+
+struct be_post_template_pages_req {
+       struct be_cmd_req_hdr hdr;
+       u16 num_pages;
+#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI        0x1
+       u16 type;
+       struct phys_addr scratch_pa;
+       struct virt_addr scratch_va;
+       struct virt_addr pages_va;
+       struct phys_addr pages[16];
+} __packed;
+
+struct be_remove_template_pages_req {
+       struct be_cmd_req_hdr hdr;
+       u16 type;
        u16 rsvd0;
 } __packed;
 
@@ -800,14 +838,18 @@ struct be_wrbq_create_req {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
        u8 ulp_num;
-       u8 rsvd0;
+       u8 dua_feature;
        struct phys_addr pages[8];
 } __packed;
 
 struct be_wrbq_create_resp {
        struct be_cmd_resp_hdr resp_hdr;
        u16 cid;
-       u16 rsvd0;
+       u8 rsvd0;
+       u8 ulp_num;
+       u32 doorbell_offset;
+       u16 register_set;
+       u16 doorbell_format;
 } __packed;
 
 #define SOL_CID_MASK           0x0000FFC0
@@ -1002,6 +1044,7 @@ union tcp_upload_params {
 } __packed;
 
 struct be_ulp_fw_cfg {
+#define BEISCSI_ULP_ISCSI_INI_MODE     0x10
        u32 ulp_mode;
        u32 etx_base;
        u32 etx_count;
@@ -1017,14 +1060,26 @@ struct be_ulp_fw_cfg {
        u32 icd_count;
 };
 
+struct be_ulp_chain_icd {
+       u32 chain_base;
+       u32 chain_count;
+};
+
 struct be_fw_cfg {
        struct be_cmd_req_hdr hdr;
        u32 be_config_number;
        u32 asic_revision;
        u32 phys_port;
+#define BEISCSI_FUNC_ISCSI_INI_MODE    0x10
+#define BEISCSI_FUNC_DUA_MODE  0x800
        u32 function_mode;
        struct be_ulp_fw_cfg ulp[2];
        u32 function_caps;
+       u32 cqid_base;
+       u32 cqid_count;
+       u32 eqid_base;
+       u32 eqid_count;
+       struct be_ulp_chain_icd chain_icd[2];
 } __packed;
 
 struct be_cmd_get_all_if_id_req {
index ef36be003f67a49f0d37cf1ed1136c87894bd942..ffadbee0b4d9a70b782cca63bc53084118a0a80d 100644 (file)
@@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
        }
        beiscsi_ep = ep->dd_data;
        phba = beiscsi_ep->phba;
-       shost = phba->shost;
 
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                   "BS_%d : In beiscsi_session_create\n");
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : PCI_ERROR Recovery\n");
+               return NULL;
+       } else {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In beiscsi_session_create\n");
+       }
 
        if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
                cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
        }
 
+       shost = phba->shost;
        cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
                                          shost, cmds_max,
                                          sizeof(*beiscsi_sess),
@@ -194,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
        struct beiscsi_conn *beiscsi_conn = conn->dd_data;
        struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
+       struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr;
+       struct hwi_wrb_context *pwrb_context;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
 
@@ -214,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
                return -EEXIST;
        }
 
+       pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
+                                               beiscsi_ep->ep_cid)];
+
        beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
        beiscsi_conn->ep = beiscsi_ep;
        beiscsi_ep->conn = beiscsi_conn;
+       beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
 
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                    "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
@@ -265,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
 
 void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
 {
-       struct be_cmd_get_if_info_resp if_info;
+       struct be_cmd_get_if_info_resp *if_info;
 
-       if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info))
+       if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
                beiscsi_create_ipv4_iface(phba);
+               kfree(if_info);
+       }
 
-       if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info))
+       if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
                beiscsi_create_ipv6_iface(phba);
+               kfree(if_info);
+       }
 }
 
 void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
@@ -467,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
        uint32_t rm_len = dt_len;
        int ret = 0 ;
 
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In PCI_ERROR Recovery\n");
+               return -EBUSY;
+       }
+
        nla_for_each_attr(attrib, data, dt_len, rm_len) {
                iface_param = nla_data(attrib);
 
@@ -512,59 +534,60 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
                struct iscsi_iface *iface, int param,
                char *buf)
 {
-       struct be_cmd_get_if_info_resp if_info;
+       struct be_cmd_get_if_info_resp *if_info;
        int len, ip_type = BE2_IPV4;
 
-       memset(&if_info, 0, sizeof(if_info));
-
        if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
                ip_type = BE2_IPV6;
 
        len = mgmt_get_if_info(phba, ip_type, &if_info);
-       if (len)
+       if (len) {
+               kfree(if_info);
                return len;
+       }
 
        switch (param) {
        case ISCSI_NET_PARAM_IPV4_ADDR:
-               len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr);
+               len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);
                break;
        case ISCSI_NET_PARAM_IPV6_ADDR:
-               len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr);
+               len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);
                break;
        case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
-               if (!if_info.dhcp_state)
+               if (!if_info->dhcp_state)
                        len = sprintf(buf, "static\n");
                else
                        len = sprintf(buf, "dhcp\n");
                break;
        case ISCSI_NET_PARAM_IPV4_SUBNET:
-               len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
+               len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);
                break;
        case ISCSI_NET_PARAM_VLAN_ENABLED:
                len = sprintf(buf, "%s\n",
-                            (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+                            (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
                             ? "Disabled\n" : "Enabled\n");
                break;
        case ISCSI_NET_PARAM_VLAN_ID:
-               if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+               if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
                        return -EINVAL;
                else
                        len = sprintf(buf, "%d\n",
-                                    (if_info.vlan_priority &
+                                    (if_info->vlan_priority &
                                     ISCSI_MAX_VLAN_ID));
                break;
        case ISCSI_NET_PARAM_VLAN_PRIORITY:
-               if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
+               if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
                        return -EINVAL;
                else
                        len = sprintf(buf, "%d\n",
-                                    ((if_info.vlan_priority >> 13) &
+                                    ((if_info->vlan_priority >> 13) &
                                     ISCSI_MAX_VLAN_PRIORITY));
                break;
        default:
                WARN_ON(1);
        }
 
+       kfree(if_info);
        return len;
 }
 
@@ -577,6 +600,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
        struct be_cmd_get_def_gateway_resp gateway;
        int len = -ENOSYS;
 
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In PCI_ERROR Recovery\n");
+               return -EBUSY;
+       }
+
        switch (param) {
        case ISCSI_NET_PARAM_IPV4_ADDR:
        case ISCSI_NET_PARAM_IPV4_SUBNET:
@@ -672,8 +701,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
                        session->max_burst = 262144;
                break;
        case ISCSI_PARAM_MAX_XMIT_DLENGTH:
-               if ((conn->max_xmit_dlength > 65536) ||
-                   (conn->max_xmit_dlength == 0))
+               if (conn->max_xmit_dlength > 65536)
                        conn->max_xmit_dlength = 65536;
        default:
                return 0;
@@ -727,7 +755,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        struct iscsi_cls_host *ihost = shost->shost_data;
 
-       ihost->port_state = (phba->state == BE_ADAPTER_UP) ?
+       ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
                ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
 }
 
@@ -795,9 +823,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
        int status = 0;
 
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                   "BS_%d : In beiscsi_get_host_param,"
-                   " param= %d\n", param);
+
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In PCI_ERROR Recovery\n");
+               return -EBUSY;
+       } else {
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In beiscsi_get_host_param,"
+                           " param = %d\n", param);
+       }
 
        switch (param) {
        case ISCSI_HOST_PARAM_HWADDRESS:
@@ -840,7 +875,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
        struct be_cmd_get_nic_conf_resp resp;
        int rc;
 
-       if (strlen(phba->mac_address))
+       if (phba->mac_addr_set)
                return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 
        memset(&resp, 0, sizeof(resp));
@@ -848,6 +883,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
        if (rc)
                return rc;
 
+       phba->mac_addr_set = true;
        memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
        return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
 }
@@ -923,6 +959,10 @@ static void  beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
                      session->max_r2t);
        AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
                      (conn->exp_statsn - 1));
+       AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+                     max_recv_data_segment_length, params,
+                     conn->max_recv_dlength);
+
 }
 
 /**
@@ -935,10 +975,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
        struct beiscsi_conn *beiscsi_conn = conn->dd_data;
        struct beiscsi_endpoint *beiscsi_ep;
        struct beiscsi_offload_params params;
+       struct beiscsi_hba *phba;
 
-       beiscsi_log(beiscsi_conn->phba, KERN_INFO,
-                   BEISCSI_LOG_CONFIG,
-                   "BS_%d : In beiscsi_conn_start\n");
+       phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
+
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In PCI_ERROR Recovery\n");
+               return -EBUSY;
+       } else {
+               beiscsi_log(beiscsi_conn->phba, KERN_INFO,
+                           BEISCSI_LOG_CONFIG,
+                           "BS_%d : In beiscsi_conn_start\n");
+       }
 
        memset(&params, 0, sizeof(struct beiscsi_offload_params));
        beiscsi_ep = beiscsi_conn->ep;
@@ -960,15 +1009,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
  */
 static int beiscsi_get_cid(struct beiscsi_hba *phba)
 {
-       unsigned short cid = 0xFFFF;
-
-       if (!phba->avlbl_cids)
-               return cid;
-
-       cid = phba->cid_array[phba->cid_alloc++];
-       if (phba->cid_alloc == phba->params.cxns_per_ctrl)
-               phba->cid_alloc = 0;
-       phba->avlbl_cids--;
+       unsigned short cid = 0xFFFF, cid_from_ulp;
+       struct ulp_cid_info *cid_info = NULL;
+       uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+
+       /* Find the ULP which has more CID available */
+       cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
+                         BEISCSI_ULP0_AVLBL_CID(phba) : 0;
+       cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ?
+                         BEISCSI_ULP1_AVLBL_CID(phba) : 0;
+       cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
+                       BEISCSI_ULP0 : BEISCSI_ULP1;
+
+       if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
+               cid_info = phba->cid_array_info[cid_from_ulp];
+               if (!cid_info->avlbl_cids)
+                       return cid;
+
+               cid = cid_info->cid_array[cid_info->cid_alloc++];
+
+               if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
+                                          phba, cid_from_ulp))
+                       cid_info->cid_alloc = 0;
+
+               cid_info->avlbl_cids--;
+       }
        return cid;
 }
 
@@ -979,10 +1044,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
  */
 static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 {
-       phba->avlbl_cids++;
-       phba->cid_array[phba->cid_free++] = cid;
-       if (phba->cid_free == phba->params.cxns_per_ctrl)
-               phba->cid_free = 0;
+       uint16_t cid_post_ulp;
+       struct hwi_controller *phwi_ctrlr;
+       struct hwi_wrb_context *pwrb_context;
+       struct ulp_cid_info *cid_info = NULL;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+       cid_post_ulp = pwrb_context->ulp_num;
+
+       cid_info = phba->cid_array_info[cid_post_ulp];
+       cid_info->avlbl_cids++;
+
+       cid_info->cid_array[cid_info->cid_free++] = cid;
+       if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
+               cid_info->cid_free = 0;
 }
 
 /**
@@ -1135,7 +1212,12 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                return ERR_PTR(ret);
        }
 
-       if (phba->state != BE_ADAPTER_UP) {
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               ret = -EBUSY;
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : In PCI_ERROR Recovery\n");
+               return ERR_PTR(ret);
+       } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
                ret = -EBUSY;
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
                            "BS_%d : The Adapter Port state is Down!!!\n");
@@ -1260,6 +1342,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
                tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
        }
 
+       if (phba->state & BE_ADAPTER_PCI_ERR) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+                           "BS_%d : PCI_ERROR Recovery\n");
+               goto free_ep;
+       }
+
        tag = mgmt_invalidate_connection(phba, beiscsi_ep,
                                          beiscsi_ep->ep_cid,
                                          mgmt_invalidate_flag,
@@ -1272,6 +1360,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
 
        beiscsi_mccq_compl(phba, tag, NULL, NULL);
        beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
+free_ep:
        beiscsi_free_ep(beiscsi_ep);
        beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
        iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
index a1f5ac7a9806dffb6c7905f15771283ae2766c33..1f375051483a428d24c63c206fb24a40cda06211 100644 (file)
@@ -149,18 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
                "\t\t\t\tMiscellaneous Events   : 0x04\n"
                "\t\t\t\tError Handling         : 0x08\n"
                "\t\t\t\tIO Path Events         : 0x10\n"
-               "\t\t\t\tConfiguration Path     : 0x20\n");
+               "\t\t\t\tConfiguration Path     : 0x20\n"
+               "\t\t\t\tiSCSI Protocol         : 0x40\n");
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
-DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
+DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
+DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
+            beiscsi_active_session_disp, NULL);
+DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
+            beiscsi_free_session_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
        &dev_attr_beiscsi_log_enable,
        &dev_attr_beiscsi_drvr_ver,
        &dev_attr_beiscsi_adapter_family,
        &dev_attr_beiscsi_fw_ver,
-       &dev_attr_beiscsi_active_cid_count,
+       &dev_attr_beiscsi_active_session_count,
+       &dev_attr_beiscsi_free_session_count,
+       &dev_attr_beiscsi_phys_port,
        NULL,
 };
 
@@ -239,6 +246,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
                return SUCCESS;
        }
        spin_unlock_bh(&session->lock);
+       /* Invalidate WRB Posted for this Task */
+       AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+                     aborted_io_task->pwrb_handle->pwrb,
+                     1);
+
        conn = aborted_task->conn;
        beiscsi_conn = conn->dd_data;
        phba = beiscsi_conn->phba;
@@ -316,6 +328,11 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
                if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
                        continue;
 
+               /* Invalidate WRB Posted for this Task */
+               AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+                             abrt_io_task->pwrb_handle->pwrb,
+                             1);
+
                inv_tbl->cid = cid;
                inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
                num_invalidate++;
@@ -699,30 +716,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
        return status;
 }
 
+/**
+ * beiscsi_get_params()- Set the config paramters
+ * @phba: ptr  device priv structure
+ **/
 static void beiscsi_get_params(struct beiscsi_hba *phba)
 {
-       phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
-                                   - (phba->fw_config.iscsi_cid_count
-                                   + BE2_TMFS
-                                   + BE2_NOPOUT_REQ));
-       phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
-       phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
+       uint32_t total_cid_count = 0;
+       uint32_t total_icd_count = 0;
+       uint8_t ulp_num = 0;
+
+       total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+                         BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               uint32_t align_mask = 0;
+               uint32_t icd_post_per_page = 0;
+               uint32_t icd_count_unavailable = 0;
+               uint32_t icd_start = 0, icd_count = 0;
+               uint32_t icd_start_align = 0, icd_count_align = 0;
+
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+                       icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+                       icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+
+                       /* Get ICD count that can be posted on each page */
+                       icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
+                                            sizeof(struct iscsi_sge)));
+                       align_mask = (icd_post_per_page - 1);
+
+                       /* Check if icd_start is aligned ICD per page posting */
+                       if (icd_start % icd_post_per_page) {
+                               icd_start_align = ((icd_start +
+                                                   icd_post_per_page) &
+                                                   ~(align_mask));
+                               phba->fw_config.
+                                       iscsi_icd_start[ulp_num] =
+                                       icd_start_align;
+                       }
+
+                       icd_count_align = (icd_count & ~align_mask);
+
+                       /* ICD discarded in the process of alignment */
+                       if (icd_start_align)
+                               icd_count_unavailable = ((icd_start_align -
+                                                         icd_start) +
+                                                        (icd_count -
+                                                         icd_count_align));
+
+                       /* Updated ICD count available */
+                       phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
+                                       icd_count_unavailable);
+
+                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                       "BM_%d : Aligned ICD values\n"
+                                       "\t ICD Start : %d\n"
+                                       "\t ICD Count : %d\n"
+                                       "\t ICD Discarded : %d\n",
+                                       phba->fw_config.
+                                       iscsi_icd_start[ulp_num],
+                                       phba->fw_config.
+                                       iscsi_icd_count[ulp_num],
+                                       icd_count_unavailable);
+                       break;
+               }
+       }
+
+       total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+       phba->params.ios_per_ctrl = (total_icd_count -
+                                   (total_cid_count +
+                                    BE2_TMFS + BE2_NOPOUT_REQ));
+       phba->params.cxns_per_ctrl = total_cid_count;
+       phba->params.asyncpdus_per_ctrl = total_cid_count;
+       phba->params.icds_per_ctrl = total_icd_count;
        phba->params.num_sge_per_io = BE2_SGE;
        phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
        phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
        phba->params.eq_timer = 64;
-       phba->params.num_eq_entries =
-           (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
-                                   + BE2_TMFS) / 512) + 1) * 512;
-       phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
-                               ? 1024 : phba->params.num_eq_entries;
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BM_%d : phba->params.num_eq_entries=%d\n",
-                   phba->params.num_eq_entries);
-       phba->params.num_cq_entries =
-           (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
-                                   + BE2_TMFS) / 512) + 1) * 512;
+       phba->params.num_eq_entries = 1024;
+       phba->params.num_cq_entries = 1024;
        phba->params.wrbs_per_cxn = 256;
 }
 
@@ -1613,8 +1685,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
 
        WARN_ON(!pasync_handle);
 
-       pasync_handle->cri =
-                       BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+       pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
        pasync_handle->is_header = is_header;
        pasync_handle->buffer_len = dpl;
        *pcq_index = index;
@@ -1674,18 +1746,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba,
 }
 
 static void hwi_free_async_msg(struct beiscsi_hba *phba,
-                                      unsigned int cri)
+                              struct hwi_async_pdu_context *pasync_ctx,
+                              unsigned int cri)
 {
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_async_pdu_context *pasync_ctx;
        struct async_pdu_handle *pasync_handle, *tmp_handle;
        struct list_head *plist;
 
-       phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
-
        plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
-
        list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
                list_del(&pasync_handle->link);
 
@@ -1720,7 +1787,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
 }
 
 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
-                                  unsigned int is_header)
+                                   unsigned int is_header, uint8_t ulp_num)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hwi_async_pdu_context *pasync_ctx;
@@ -1728,13 +1795,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
        struct list_head *pfree_link, *pbusy_list;
        struct phys_addr *pasync_sge;
        unsigned int ring_id, num_entries;
-       unsigned int host_write_num;
+       unsigned int host_write_num, doorbell_offset;
        unsigned int writables;
        unsigned int i = 0;
        u32 doorbell = 0;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
        num_entries = pasync_ctx->num_entries;
 
        if (is_header) {
@@ -1742,13 +1809,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
                                pasync_ctx->async_header.free_entries);
                pfree_link = pasync_ctx->async_header.free_list.next;
                host_write_num = pasync_ctx->async_header.host_write_ptr;
-               ring_id = phwi_ctrlr->default_pdu_hdr.id;
+               ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
+               doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
+                                 doorbell_offset;
        } else {
                writables = min(pasync_ctx->async_data.writables,
                                pasync_ctx->async_data.free_entries);
                pfree_link = pasync_ctx->async_data.free_list.next;
                host_write_num = pasync_ctx->async_data.host_write_ptr;
-               ring_id = phwi_ctrlr->default_pdu_data.id;
+               ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
+               doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
+                                 doorbell_offset;
        }
 
        writables = (writables / 8) * 8;
@@ -1796,7 +1867,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,
                doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
                                        << DB_DEF_PDU_CQPROC_SHIFT;
 
-               iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
+               iowrite32(doorbell, phba->db_va + doorbell_offset);
        }
 }
 
@@ -1808,9 +1879,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
        struct hwi_async_pdu_context *pasync_ctx;
        struct async_pdu_handle *pasync_handle = NULL;
        unsigned int cq_index = -1;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+                    BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+                    cri_index));
 
        pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
                                             pdpdu_cqe, &cq_index);
@@ -1819,8 +1894,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
                hwi_update_async_writables(phba, pasync_ctx,
                                           pasync_handle->is_header, cq_index);
 
-       hwi_free_async_msg(phba, pasync_handle->cri);
-       hwi_post_async_buffers(phba, pasync_handle->is_header);
+       hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
+       hwi_post_async_buffers(phba, pasync_handle->is_header,
+                              BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+                              cri_index));
 }
 
 static unsigned int
@@ -1859,7 +1936,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
                                            phdr, hdr_len, pfirst_buffer,
                                            offset);
 
-       hwi_free_async_msg(phba, cri);
+       hwi_free_async_msg(phba, pasync_ctx, cri);
        return 0;
 }
 
@@ -1875,13 +1952,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
        struct pdu_base *ppdu;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+                    BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+                    BE_GET_CRI_FROM_CID(beiscsi_conn->
+                                beiscsi_conn_cid)));
 
        list_del(&pasync_handle->link);
        if (pasync_handle->is_header) {
                pasync_ctx->async_header.busy_entries--;
                if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
-                       hwi_free_async_msg(phba, cri);
+                       hwi_free_async_msg(phba, pasync_ctx, cri);
                        BUG();
                }
 
@@ -1936,9 +2016,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
        struct hwi_async_pdu_context *pasync_ctx;
        struct async_pdu_handle *pasync_handle = NULL;
        unsigned int cq_index = -1;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
+       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
+                    BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
+                    cri_index));
+
        pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
                                             pdpdu_cqe, &cq_index);
 
@@ -1947,7 +2032,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
                                           pasync_handle->is_header, cq_index);
 
        hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
-       hwi_post_async_buffers(phba, pasync_handle->is_header);
+       hwi_post_async_buffers(phba, pasync_handle->is_header,
+                              BEISCSI_GET_ULP_FROM_CRI(
+                              phwi_ctrlr, cri_index));
 }
 
 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
@@ -2072,8 +2159,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                                    "BM_%d : Received %s[%d] on CID : %d\n",
                                    cqe_desc[code], code, cid);
 
+                       spin_lock_bh(&phba->async_pdu_lock);
                        hwi_process_default_pdu_ring(beiscsi_conn, phba,
                                             (struct i_t_dpdu_cqe *)sol);
+                       spin_unlock_bh(&phba->async_pdu_lock);
                        break;
                case UNSOL_DATA_NOTIFY:
                        beiscsi_log(phba, KERN_INFO,
@@ -2081,8 +2170,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                                    "BM_%d : Received %s[%d] on CID : %d\n",
                                    cqe_desc[code], code, cid);
 
+                       spin_lock_bh(&phba->async_pdu_lock);
                        hwi_process_default_pdu_ring(beiscsi_conn, phba,
                                             (struct i_t_dpdu_cqe *)sol);
+                       spin_unlock_bh(&phba->async_pdu_lock);
                        break;
                case CXN_INVALIDATE_INDEX_NOTIFY:
                case CMD_INVALIDATED_NOTIFY:
@@ -2110,8 +2201,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
                                    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
                                    cqe_desc[code], code, cid);
+                       spin_lock_bh(&phba->async_pdu_lock);
                        hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
                                             (struct i_t_dpdu_cqe *) sol);
+                       spin_unlock_bh(&phba->async_pdu_lock);
                        break;
                case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
                case CXN_KILLED_BURST_LEN_MISMATCH:
@@ -2476,26 +2569,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
        AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
 }
 
+/**
+ * beiscsi_find_mem_req()- Find mem needed
+ * @phba: ptr to HBA struct
+ **/
 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 {
+       uint8_t mem_descr_index, ulp_num;
        unsigned int num_cq_pages, num_async_pdu_buf_pages;
        unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
        unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
 
        num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
                                      sizeof(struct sol_cqe));
-       num_async_pdu_buf_pages =
-                       PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-                                      phba->params.defpdu_hdr_sz);
-       num_async_pdu_buf_sgl_pages =
-                       PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-                                      sizeof(struct phys_addr));
-       num_async_pdu_data_pages =
-                       PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-                                      phba->params.defpdu_data_sz);
-       num_async_pdu_data_sgl_pages =
-                       PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
-                                      sizeof(struct phys_addr));
 
        phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
 
@@ -2517,24 +2603,79 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
                phba->params.icds_per_ctrl;
        phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
                phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
-
-       phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
-               num_async_pdu_buf_pages * PAGE_SIZE;
-       phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
-               num_async_pdu_data_pages * PAGE_SIZE;
-       phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
-               num_async_pdu_buf_sgl_pages * PAGE_SIZE;
-       phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
-               num_async_pdu_data_sgl_pages * PAGE_SIZE;
-       phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
-               phba->params.asyncpdus_per_ctrl *
-               sizeof(struct async_pdu_handle);
-       phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
-               phba->params.asyncpdus_per_ctrl *
-               sizeof(struct async_pdu_handle);
-       phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
-               sizeof(struct hwi_async_pdu_context) +
-               (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+                       num_async_pdu_buf_sgl_pages =
+                               PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+                                              phba, ulp_num) *
+                                              sizeof(struct phys_addr));
+
+                       num_async_pdu_buf_pages =
+                               PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+                                              phba, ulp_num) *
+                                              phba->params.defpdu_hdr_sz);
+
+                       num_async_pdu_data_pages =
+                               PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+                                              phba, ulp_num) *
+                                              phba->params.defpdu_data_sz);
+
+                       num_async_pdu_data_sgl_pages =
+                               PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
+                                              phba, ulp_num) *
+                                              sizeof(struct phys_addr));
+
+                       mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                       BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+                                       BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
+
+                       mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         num_async_pdu_buf_pages *
+                                         PAGE_SIZE;
+
+                       mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         num_async_pdu_data_pages *
+                                         PAGE_SIZE;
+
+                       mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         num_async_pdu_buf_sgl_pages *
+                                         PAGE_SIZE;
+
+                       mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         num_async_pdu_data_sgl_pages *
+                                         PAGE_SIZE;
+
+                       mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+                                         sizeof(struct async_pdu_handle);
+
+                       mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+                                         sizeof(struct async_pdu_handle);
+
+                       mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+                                         (ulp_num * MEM_DESCR_OFFSET));
+                       phba->mem_req[mem_descr_index] =
+                                         sizeof(struct hwi_async_pdu_context) +
+                                        (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+                                         sizeof(struct hwi_async_entry));
+               }
+       }
 }
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
@@ -2576,6 +2717,12 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
 
        mem_descr = phba->init_mem;
        for (i = 0; i < SE_MEM_MAX; i++) {
+               if (!phba->mem_req[i]) {
+                       mem_descr->mem_array = NULL;
+                       mem_descr++;
+                       continue;
+               }
+
                j = 0;
                mem_arr = mem_arr_orig;
                alloc_size = phba->mem_req[i];
@@ -2697,7 +2844,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
        /* Allocate memory for WRBQ */
        phwi_ctxt = phwi_ctrlr->phwi_ctxt;
        phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
-                                    phba->fw_config.iscsi_cid_count,
+                                    phba->params.cxns_per_ctrl,
                                     GFP_KERNEL);
        if (!phwi_ctxt->be_wrbq) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -2779,6 +2926,7 @@ init_wrb_hndl_failed:
 
 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
+       uint8_t ulp_num;
        struct hwi_controller *phwi_ctrlr;
        struct hba_parameters *p = &phba->params;
        struct hwi_async_pdu_context *pasync_ctx;
@@ -2786,155 +2934,150 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        unsigned int index, idx, num_per_mem, num_async_data;
        struct be_mem_descriptor *mem_descr;
 
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
-
-       phwi_ctrlr = phba->phwi_ctrlr;
-       phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
+                                    (ulp_num * MEM_DESCR_OFFSET));
+
+                       phwi_ctrlr = phba->phwi_ctrlr;
+                       phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
+                               (struct hwi_async_pdu_context *)
+                                mem_descr->mem_array[0].virtual_address;
+
+                       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+                       memset(pasync_ctx, 0, sizeof(*pasync_ctx));
+
+                       pasync_ctx->async_entry =
+                                       (struct hwi_async_entry *)
+                                       ((long unsigned int)pasync_ctx +
+                                       sizeof(struct hwi_async_pdu_context));
+
+                       pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
+                                                 ulp_num);
+                       pasync_ctx->buffer_size = p->defpdu_hdr_sz;
+
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
+                               (ulp_num * MEM_DESCR_OFFSET);
+                       if (mem_descr->mem_array[0].virtual_address) {
+                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                           "BM_%d : hwi_init_async_pdu_ctx"
+                                           " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
+                                           ulp_num,
+                                           mem_descr->mem_array[0].
+                                           virtual_address);
+                       } else
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : No Virtual address for ULP : %d\n",
+                                           ulp_num);
+
+                       pasync_ctx->async_header.va_base =
                                mem_descr->mem_array[0].virtual_address;
-       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
-       memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
-       pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
-                                         phba->fw_config.iscsi_cid_count,
-                                         GFP_KERNEL);
-       if (!pasync_ctx->async_entry) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
-               return -ENOMEM;
-       }
-
-       pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
-       pasync_ctx->buffer_size = p->defpdu_hdr_sz;
-
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
-       if (mem_descr->mem_array[0].virtual_address) {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BM_%d : hwi_init_async_pdu_ctx"
-                           " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
-                           mem_descr->mem_array[0].virtual_address);
-       } else
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : No Virtual address\n");
-
-       pasync_ctx->async_header.va_base =
-                       mem_descr->mem_array[0].virtual_address;
-
-       pasync_ctx->async_header.pa_base.u.a64.address =
-                       mem_descr->mem_array[0].bus_address.u.a64.address;
-
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_HEADER_RING;
-       if (mem_descr->mem_array[0].virtual_address) {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BM_%d : hwi_init_async_pdu_ctx"
-                           " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
-                           mem_descr->mem_array[0].virtual_address);
-       } else
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : No Virtual address\n");
-
-       pasync_ctx->async_header.ring_base =
-                       mem_descr->mem_array[0].virtual_address;
-
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
-       if (mem_descr->mem_array[0].virtual_address) {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BM_%d : hwi_init_async_pdu_ctx"
-                           " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
-                           mem_descr->mem_array[0].virtual_address);
-       } else
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : No Virtual address\n");
-
-       pasync_ctx->async_header.handle_base =
-                       mem_descr->mem_array[0].virtual_address;
-       pasync_ctx->async_header.writables = 0;
-       INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
-
-
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_DATA_RING;
-       if (mem_descr->mem_array[0].virtual_address) {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BM_%d : hwi_init_async_pdu_ctx"
-                           " HWI_MEM_ASYNC_DATA_RING va=%p\n",
-                           mem_descr->mem_array[0].virtual_address);
-       } else
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : No Virtual address\n");
-
-       pasync_ctx->async_data.ring_base =
-                       mem_descr->mem_array[0].virtual_address;
-
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
-       if (!mem_descr->mem_array[0].virtual_address)
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : No Virtual address\n");
+                       pasync_ctx->async_header.pa_base.u.a64.address =
+                               mem_descr->mem_array[0].
+                               bus_address.u.a64.address;
 
-       pasync_ctx->async_data.handle_base =
-                       mem_descr->mem_array[0].virtual_address;
-       pasync_ctx->async_data.writables = 0;
-       INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+                                    (ulp_num * MEM_DESCR_OFFSET);
+                       if (mem_descr->mem_array[0].virtual_address) {
+                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                           "BM_%d : hwi_init_async_pdu_ctx"
+                                           " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
+                                           ulp_num,
+                                           mem_descr->mem_array[0].
+                                           virtual_address);
+                       } else
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : No Virtual address for ULP : %d\n",
+                                           ulp_num);
+
+                       pasync_ctx->async_header.ring_base =
+                               mem_descr->mem_array[0].virtual_address;
 
-       pasync_header_h =
-               (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
-       pasync_data_h =
-               (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
+                                    (ulp_num * MEM_DESCR_OFFSET);
+                       if (mem_descr->mem_array[0].virtual_address) {
+                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                           "BM_%d : hwi_init_async_pdu_ctx"
+                                           " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
+                                           ulp_num,
+                                           mem_descr->mem_array[0].
+                                           virtual_address);
+                       } else
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : No Virtual address for ULP : %d\n",
+                                           ulp_num);
+
+                       pasync_ctx->async_header.handle_base =
+                               mem_descr->mem_array[0].virtual_address;
+                       pasync_ctx->async_header.writables = 0;
+                       INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
+
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+                                    (ulp_num * MEM_DESCR_OFFSET);
+                       if (mem_descr->mem_array[0].virtual_address) {
+                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                           "BM_%d : hwi_init_async_pdu_ctx"
+                                           " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
+                                           ulp_num,
+                                           mem_descr->mem_array[0].
+                                           virtual_address);
+                       } else
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : No Virtual address for ULP : %d\n",
+                                           ulp_num);
+
+                       pasync_ctx->async_data.ring_base =
+                               mem_descr->mem_array[0].virtual_address;
 
-       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_DATA_BUF;
-       if (mem_descr->mem_array[0].virtual_address) {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                           "BM_%d : hwi_init_async_pdu_ctx"
-                           " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
-                           mem_descr->mem_array[0].virtual_address);
-       } else
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : No Virtual address\n");
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
+                                    (ulp_num * MEM_DESCR_OFFSET);
+                       if (!mem_descr->mem_array[0].virtual_address)
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : No Virtual address for ULP : %d\n",
+                                           ulp_num);
 
-       idx = 0;
-       pasync_ctx->async_data.va_base =
-                       mem_descr->mem_array[idx].virtual_address;
-       pasync_ctx->async_data.pa_base.u.a64.address =
-                       mem_descr->mem_array[idx].bus_address.u.a64.address;
-
-       num_async_data = ((mem_descr->mem_array[idx].size) /
-                               phba->params.defpdu_data_sz);
-       num_per_mem = 0;
-
-       for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
-               pasync_header_h->cri = -1;
-               pasync_header_h->index = (char)index;
-               INIT_LIST_HEAD(&pasync_header_h->link);
-               pasync_header_h->pbuffer =
-                       (void *)((unsigned long)
-                       (pasync_ctx->async_header.va_base) +
-                       (p->defpdu_hdr_sz * index));
-
-               pasync_header_h->pa.u.a64.address =
-                       pasync_ctx->async_header.pa_base.u.a64.address +
-                       (p->defpdu_hdr_sz * index);
-
-               list_add_tail(&pasync_header_h->link,
-                               &pasync_ctx->async_header.free_list);
-               pasync_header_h++;
-               pasync_ctx->async_header.free_entries++;
-               pasync_ctx->async_header.writables++;
-
-               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
-               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-                              header_busy_list);
-               pasync_data_h->cri = -1;
-               pasync_data_h->index = (char)index;
-               INIT_LIST_HEAD(&pasync_data_h->link);
-
-               if (!num_async_data) {
-                       num_per_mem = 0;
-                       idx++;
+                       pasync_ctx->async_data.handle_base =
+                               mem_descr->mem_array[0].virtual_address;
+                       pasync_ctx->async_data.writables = 0;
+                       INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
+
+                       pasync_header_h =
+                               (struct async_pdu_handle *)
+                               pasync_ctx->async_header.handle_base;
+                       pasync_data_h =
+                               (struct async_pdu_handle *)
+                               pasync_ctx->async_data.handle_base;
+
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
+                                    (ulp_num * MEM_DESCR_OFFSET);
+                       if (mem_descr->mem_array[0].virtual_address) {
+                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                           "BM_%d : hwi_init_async_pdu_ctx"
+                                           " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
+                                           ulp_num,
+                                           mem_descr->mem_array[0].
+                                           virtual_address);
+                       } else
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : No Virtual address for ULP : %d\n",
+                                           ulp_num);
+
+                       idx = 0;
                        pasync_ctx->async_data.va_base =
                                mem_descr->mem_array[idx].virtual_address;
                        pasync_ctx->async_data.pa_base.u.a64.address =
@@ -2943,32 +3086,83 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 
                        num_async_data = ((mem_descr->mem_array[idx].size) /
                                        phba->params.defpdu_data_sz);
-               }
-               pasync_data_h->pbuffer =
-                       (void *)((unsigned long)
-                       (pasync_ctx->async_data.va_base) +
-                       (p->defpdu_data_sz * num_per_mem));
-
-               pasync_data_h->pa.u.a64.address =
-                   pasync_ctx->async_data.pa_base.u.a64.address +
-                   (p->defpdu_data_sz * num_per_mem);
-               num_per_mem++;
-               num_async_data--;
+                       num_per_mem = 0;
 
-               list_add_tail(&pasync_data_h->link,
-                             &pasync_ctx->async_data.free_list);
-               pasync_data_h++;
-               pasync_ctx->async_data.free_entries++;
-               pasync_ctx->async_data.writables++;
+                       for (index = 0; index < BEISCSI_GET_CID_COUNT
+                                       (phba, ulp_num); index++) {
+                               pasync_header_h->cri = -1;
+                               pasync_header_h->index = (char)index;
+                               INIT_LIST_HEAD(&pasync_header_h->link);
+                               pasync_header_h->pbuffer =
+                                       (void *)((unsigned long)
+                                                (pasync_ctx->
+                                                 async_header.va_base) +
+                                                (p->defpdu_hdr_sz * index));
+
+                               pasync_header_h->pa.u.a64.address =
+                                       pasync_ctx->async_header.pa_base.u.a64.
+                                       address + (p->defpdu_hdr_sz * index);
+
+                               list_add_tail(&pasync_header_h->link,
+                                             &pasync_ctx->async_header.
+                                             free_list);
+                               pasync_header_h++;
+                               pasync_ctx->async_header.free_entries++;
+                               pasync_ctx->async_header.writables++;
+
+                               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+                                              wait_queue.list);
+                               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+                                              header_busy_list);
+                               pasync_data_h->cri = -1;
+                               pasync_data_h->index = (char)index;
+                               INIT_LIST_HEAD(&pasync_data_h->link);
+
+                               if (!num_async_data) {
+                                       num_per_mem = 0;
+                                       idx++;
+                                       pasync_ctx->async_data.va_base =
+                                               mem_descr->mem_array[idx].
+                                               virtual_address;
+                                       pasync_ctx->async_data.pa_base.u.
+                                               a64.address =
+                                               mem_descr->mem_array[idx].
+                                               bus_address.u.a64.address;
+                                       num_async_data =
+                                               ((mem_descr->mem_array[idx].
+                                                 size) /
+                                                phba->params.defpdu_data_sz);
+                               }
+                               pasync_data_h->pbuffer =
+                                       (void *)((unsigned long)
+                                       (pasync_ctx->async_data.va_base) +
+                                       (p->defpdu_data_sz * num_per_mem));
+
+                               pasync_data_h->pa.u.a64.address =
+                                       pasync_ctx->async_data.pa_base.u.a64.
+                                       address + (p->defpdu_data_sz *
+                                       num_per_mem);
+                               num_per_mem++;
+                               num_async_data--;
+
+                               list_add_tail(&pasync_data_h->link,
+                                             &pasync_ctx->async_data.
+                                             free_list);
+                               pasync_data_h++;
+                               pasync_ctx->async_data.free_entries++;
+                               pasync_ctx->async_data.writables++;
+
+                               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
+                                              data_busy_list);
+                       }
 
-               INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
+                       pasync_ctx->async_header.host_write_ptr = 0;
+                       pasync_ctx->async_header.ep_read_ptr = -1;
+                       pasync_ctx->async_data.host_write_ptr = 0;
+                       pasync_ctx->async_data.ep_read_ptr = -1;
+               }
        }
 
-       pasync_ctx->async_header.host_write_ptr = 0;
-       pasync_ctx->async_header.ep_read_ptr = -1;
-       pasync_ctx->async_data.host_write_ptr = 0;
-       pasync_ctx->async_data.ep_read_ptr = -1;
-
        return 0;
 }
 
@@ -3164,7 +3358,7 @@ static int
 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
                       struct hwi_context_memory *phwi_context,
                       struct hwi_controller *phwi_ctrlr,
-                      unsigned int def_pdu_ring_sz)
+                      unsigned int def_pdu_ring_sz, uint8_t ulp_num)
 {
        unsigned int idx;
        int ret;
@@ -3174,36 +3368,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
        void *dq_vaddress;
 
        idx = 0;
-       dq = &phwi_context->be_def_hdrq;
+       dq = &phwi_context->be_def_hdrq[ulp_num];
        cq = &phwi_context->be_cq[0];
        mem = &dq->dma_mem;
        mem_descr = phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_HEADER_RING;
+       mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
+                   (ulp_num * MEM_DESCR_OFFSET);
        dq_vaddress = mem_descr->mem_array[idx].virtual_address;
        ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
                            sizeof(struct phys_addr),
                            sizeof(struct phys_addr), dq_vaddress);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
+                           "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
+                           ulp_num);
+
                return ret;
        }
        mem->dma = (unsigned long)mem_descr->mem_array[idx].
                                  bus_address.u.a64.address;
        ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
                                              def_pdu_ring_sz,
-                                             phba->params.defpdu_hdr_sz);
+                                             phba->params.defpdu_hdr_sz,
+                                             BEISCSI_DEFQ_HDR, ulp_num);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
+                           "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
+                           ulp_num);
+
                return ret;
        }
-       phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BM_%d : iscsi def pdu id is %d\n",
-                   phwi_context->be_def_hdrq.id);
 
-       hwi_post_async_buffers(phba, 1);
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
+                   ulp_num,
+                   phwi_context->be_def_hdrq[ulp_num].id);
+       hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
        return 0;
 }
 
@@ -3211,7 +3411,7 @@ static int
 beiscsi_create_def_data(struct beiscsi_hba *phba,
                        struct hwi_context_memory *phwi_context,
                        struct hwi_controller *phwi_ctrlr,
-                       unsigned int def_pdu_ring_sz)
+                       unsigned int def_pdu_ring_sz, uint8_t ulp_num)
 {
        unsigned int idx;
        int ret;
@@ -3221,43 +3421,86 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
        void *dq_vaddress;
 
        idx = 0;
-       dataq = &phwi_context->be_def_dataq;
+       dataq = &phwi_context->be_def_dataq[ulp_num];
        cq = &phwi_context->be_cq[0];
        mem = &dataq->dma_mem;
        mem_descr = phba->init_mem;
-       mem_descr += HWI_MEM_ASYNC_DATA_RING;
+       mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
+                   (ulp_num * MEM_DESCR_OFFSET);
        dq_vaddress = mem_descr->mem_array[idx].virtual_address;
        ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
                            sizeof(struct phys_addr),
                            sizeof(struct phys_addr), dq_vaddress);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
+                           "BM_%d : be_fill_queue Failed for DEF PDU "
+                           "DATA on ULP : %d\n",
+                           ulp_num);
+
                return ret;
        }
        mem->dma = (unsigned long)mem_descr->mem_array[idx].
                                  bus_address.u.a64.address;
        ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
                                              def_pdu_ring_sz,
-                                             phba->params.defpdu_data_sz);
+                                             phba->params.defpdu_data_sz,
+                                             BEISCSI_DEFQ_DATA, ulp_num);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d be_cmd_create_default_pdu_queue"
-                           " Failed for DEF PDU DATA\n");
+                           " Failed for DEF PDU DATA on ULP : %d\n",
+                           ulp_num);
                return ret;
        }
-       phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
+
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BM_%d : iscsi def data id is %d\n",
-                   phwi_context->be_def_dataq.id);
+                   "BM_%d : iscsi def data id on ULP : %d is  %d\n",
+                   ulp_num,
+                   phwi_context->be_def_dataq[ulp_num].id);
 
-       hwi_post_async_buffers(phba, 0);
+       hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BM_%d : DEFAULT PDU DATA RING CREATED\n");
+                   "BM_%d : DEFAULT PDU DATA RING CREATED"
+                   "on ULP : %d\n", ulp_num);
 
        return 0;
 }
 
+
+static int
+beiscsi_post_template_hdr(struct beiscsi_hba *phba)
+{
+       struct be_mem_descriptor *mem_descr;
+       struct mem_array *pm_arr;
+       struct be_dma_mem sgl;
+       int status, ulp_num;
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+                       mem_descr = (struct be_mem_descriptor *)phba->init_mem;
+                       mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
+                                   (ulp_num * MEM_DESCR_OFFSET);
+                       pm_arr = mem_descr->mem_array;
+
+                       hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
+                       status = be_cmd_iscsi_post_template_hdr(
+                                &phba->ctrl, &sgl);
+
+                       if (status != 0) {
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Post Template HDR Failed for"
+                                           "ULP_%d\n", ulp_num);
+                               return status;
+                       }
+
+                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                   "BM_%d : Template HDR Pages Posted for"
+                                   "ULP_%d\n", ulp_num);
+               }
+       }
+       return 0;
+}
+
 static int
 beiscsi_post_pages(struct beiscsi_hba *phba)
 {
@@ -3265,14 +3508,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
        struct mem_array *pm_arr;
        unsigned int page_offset, i;
        struct be_dma_mem sgl;
-       int status;
+       int status, ulp_num = 0;
 
        mem_descr = phba->init_mem;
        mem_descr += HWI_MEM_SGE;
        pm_arr = mem_descr->mem_array;
 
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+                       break;
+
        page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
-                       phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
+                       phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
        for (i = 0; i < mem_descr->num_elements; i++) {
                hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
                status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
@@ -3324,13 +3571,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
 {
        unsigned int wrb_mem_index, offset, size, num_wrb_rings;
        u64 pa_addr_lo;
-       unsigned int idx, num, i;
+       unsigned int idx, num, i, ulp_num;
        struct mem_array *pwrb_arr;
        void *wrb_vaddr;
        struct be_dma_mem sgl;
        struct be_mem_descriptor *mem_descr;
        struct hwi_wrb_context *pwrb_context;
        int status;
+       uint8_t ulp_count = 0, ulp_base_num = 0;
+       uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
 
        idx = 0;
        mem_descr = phba->init_mem;
@@ -3374,14 +3623,37 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                        num_wrb_rings--;
                }
        }
+
+       /* Get the ULP Count */
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+                       ulp_count++;
+                       ulp_base_num = ulp_num;
+                       cid_count_ulp[ulp_num] =
+                               BEISCSI_GET_CID_COUNT(phba, ulp_num);
+               }
+
        for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
                wrb_mem_index = 0;
                offset = 0;
                size = 0;
 
+               if (ulp_count > 1) {
+                       ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
+
+                       if (!cid_count_ulp[ulp_base_num])
+                               ulp_base_num = (ulp_base_num + 1) %
+                                               BEISCSI_ULP_COUNT;
+
+                       cid_count_ulp[ulp_base_num]--;
+               }
+
+
                hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
                status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
-                                           &phwi_context->be_wrbq[i]);
+                                           &phwi_context->be_wrbq[i],
+                                           &phwi_ctrlr->wrb_context[i],
+                                           ulp_base_num);
                if (status != 0) {
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                                    "BM_%d : wrbq create failed.");
@@ -3389,7 +3661,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                        return status;
                }
                pwrb_context = &phwi_ctrlr->wrb_context[i];
-               pwrb_context->cid = phwi_context->be_wrbq[i].id;
                BE_SET_CID_TO_CRI(i, pwrb_context->cid);
        }
        kfree(pwrb_arr);
@@ -3433,10 +3704,13 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
        struct hwi_async_pdu_context *pasync_ctx;
-       int i, eq_num;
+       int i, eq_num, ulp_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
+
+       be_cmd_iscsi_remove_template_hdr(ctrl);
+
        for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
                q = &phwi_context->be_wrbq[i];
                if (q->created)
@@ -3445,13 +3719,20 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        kfree(phwi_context->be_wrbq);
        free_wrb_handles(phba);
 
-       q = &phwi_context->be_def_hdrq;
-       if (q->created)
-               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 
-       q = &phwi_context->be_def_dataq;
-       if (q->created)
-               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+                       q = &phwi_context->be_def_hdrq[ulp_num];
+                       if (q->created)
+                               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+                       q = &phwi_context->be_def_dataq[ulp_num];
+                       if (q->created)
+                               beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+                       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+               }
+       }
 
        beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
 
@@ -3470,9 +3751,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
        }
        be_mcc_queues_destroy(phba);
-
-       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
-       kfree(pasync_ctx->async_entry);
        be_cmd_fw_uninit(ctrl);
 }
 
@@ -3538,8 +3816,19 @@ static void find_num_cpus(struct beiscsi_hba *phba)
                                  BEISCSI_MAX_NUM_CPUS : num_cpus;
                break;
        case BE_GEN4:
-               phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
-                                 OC_SKH_MAX_NUM_CPUS : num_cpus;
+               /*
+                * If eqid_count == 1 fall back to
+                * INTX mechanism
+                **/
+               if (phba->fw_config.eqid_count == 1) {
+                       enable_msix = 0;
+                       phba->num_cpus = 1;
+                       return;
+               }
+
+               phba->num_cpus =
+                       (num_cpus > (phba->fw_config.eqid_count - 1)) ?
+                       (phba->fw_config.eqid_count - 1) : num_cpus;
                break;
        default:
                phba->num_cpus = 1;
@@ -3552,10 +3841,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
        struct hwi_context_memory *phwi_context;
        unsigned int def_pdu_ring_sz;
        struct be_ctrl_info *ctrl = &phba->ctrl;
-       int status;
+       int status, ulp_num;
 
-       def_pdu_ring_sz =
-               phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
        phwi_context->max_eqd = 0;
@@ -3588,27 +3875,48 @@ static int hwi_init_port(struct beiscsi_hba *phba)
                goto error;
        }
 
-       status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
-                                       def_pdu_ring_sz);
-       if (status != 0) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Default Header not created\n");
-               goto error;
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+                       def_pdu_ring_sz =
+                               BEISCSI_GET_CID_COUNT(phba, ulp_num) *
+                               sizeof(struct phys_addr);
+
+                       status = beiscsi_create_def_hdr(phba, phwi_context,
+                                                       phwi_ctrlr,
+                                                       def_pdu_ring_sz,
+                                                       ulp_num);
+                       if (status != 0) {
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Default Header not created for ULP : %d\n",
+                                           ulp_num);
+                               goto error;
+                       }
+
+                       status = beiscsi_create_def_data(phba, phwi_context,
+                                                        phwi_ctrlr,
+                                                        def_pdu_ring_sz,
+                                                        ulp_num);
+                       if (status != 0) {
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Default Data not created for ULP : %d\n",
+                                           ulp_num);
+                               goto error;
+                       }
+               }
        }
 
-       status = beiscsi_create_def_data(phba, phwi_context,
-                                        phwi_ctrlr, def_pdu_ring_sz);
+       status = beiscsi_post_pages(phba);
        if (status != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Default Data not created\n");
+                           "BM_%d : Post SGL Pages Failed\n");
                goto error;
        }
 
-       status = beiscsi_post_pages(phba);
+       status = beiscsi_post_template_hdr(phba);
        if (status != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Post SGL Pages Failed\n");
-               goto error;
+                           "BM_%d : Template HDR Posting for CXN Failed\n");
        }
 
        status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
@@ -3618,6 +3926,26 @@ static int hwi_init_port(struct beiscsi_hba *phba)
                goto error;
        }
 
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               uint16_t async_arr_idx = 0;
+
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+                       uint16_t cri = 0;
+                       struct hwi_async_pdu_context *pasync_ctx;
+
+                       pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
+                                    phwi_ctrlr, ulp_num);
+                       for (cri = 0; cri <
+                            phba->params.cxns_per_ctrl; cri++) {
+                               if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
+                                              (phwi_ctrlr, cri))
+                                       pasync_ctx->cid_to_async_cri_map[
+                                       phwi_ctrlr->wrb_context[cri].cid] =
+                                       async_arr_idx++;
+                       }
+               }
+       }
+
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                    "BM_%d : hwi_init_port success\n");
        return 0;
@@ -3682,6 +4010,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
                          (unsigned long)mem_descr->mem_array[j - 1].
                          bus_address.u.a64.address);
                }
+
                kfree(mem_descr->mem_array);
                mem_descr++;
        }
@@ -3721,6 +4050,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
        struct sgl_handle *psgl_handle;
        struct iscsi_sge *pfrag;
        unsigned int arr_index, i, idx;
+       unsigned int ulp_icd_start, ulp_num = 0;
 
        phba->io_sgl_hndl_avbl = 0;
        phba->eh_sgl_hndl_avbl = 0;
@@ -3787,6 +4117,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
                    "\n BM_%d : mem_descr_sg->num_elements=%d\n",
                    mem_descr_sg->num_elements);
 
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+               if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+                       break;
+
+       ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
+
        arr_index = 0;
        idx = 0;
        while (idx < mem_descr_sg->num_elements) {
@@ -3805,8 +4141,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
                        AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
                        AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
                        pfrag += phba->params.num_sge_per_io;
-                       psgl_handle->sgl_index =
-                               phba->fw_config.iscsi_icd_start + arr_index++;
+                       psgl_handle->sgl_index = ulp_icd_start + arr_index++;
                }
                idx++;
        }
@@ -3819,15 +4154,46 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-       int i;
+       int ret;
+       uint16_t i, ulp_num;
+       struct ulp_cid_info *ptr_cid_info = NULL;
 
-       phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
-                                 GFP_KERNEL);
-       if (!phba->cid_array) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Failed to allocate memory in "
-                           "hba_setup_cid_tbls\n");
-               return -ENOMEM;
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+                       ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
+                                              GFP_KERNEL);
+
+                       if (!ptr_cid_info) {
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Failed to allocate memory"
+                                           "for ULP_CID_INFO for ULP : %d\n",
+                                           ulp_num);
+                               ret = -ENOMEM;
+                               goto free_memory;
+
+                       }
+
+                       /* Allocate memory for CID array */
+                       ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
+                                                 BEISCSI_GET_CID_COUNT(phba,
+                                                 ulp_num), GFP_KERNEL);
+                       if (!ptr_cid_info->cid_array) {
+                               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                                           "BM_%d : Failed to allocate memory"
+                                           "for CID_ARRAY for ULP : %d\n",
+                                           ulp_num);
+                               kfree(ptr_cid_info);
+                               ptr_cid_info = NULL;
+                               ret = -ENOMEM;
+
+                               goto free_memory;
+                       }
+                       ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
+                                                  phba, ulp_num);
+
+                       /* Save the cid_info_array ptr */
+                       phba->cid_array_info[ulp_num] = ptr_cid_info;
+               }
        }
        phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
                                 phba->params.cxns_per_ctrl, GFP_KERNEL);
@@ -3835,9 +4201,9 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in "
                            "hba_setup_cid_tbls\n");
-               kfree(phba->cid_array);
-               phba->cid_array = NULL;
-               return -ENOMEM;
+               ret = -ENOMEM;
+
+               goto free_memory;
        }
 
        phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
@@ -3847,18 +4213,44 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                            "BM_%d : Failed to allocate memory in"
                            "hba_setup_cid_tbls\n");
 
-               kfree(phba->cid_array);
                kfree(phba->ep_array);
-               phba->cid_array = NULL;
                phba->ep_array = NULL;
-               return -ENOMEM;
+               ret = -ENOMEM;
+       }
+
+       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+               ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
+
+               ptr_cid_info = phba->cid_array_info[ulp_num];
+               ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
+                       phba->phwi_ctrlr->wrb_context[i].cid;
+
        }
 
-       for (i = 0; i < phba->params.cxns_per_ctrl; i++)
-               phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+                       ptr_cid_info = phba->cid_array_info[ulp_num];
 
-       phba->avlbl_cids = phba->params.cxns_per_ctrl;
+                       ptr_cid_info->cid_alloc = 0;
+                       ptr_cid_info->cid_free = 0;
+               }
+       }
        return 0;
+
+free_memory:
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+                       ptr_cid_info = phba->cid_array_info[ulp_num];
+
+                       if (ptr_cid_info) {
+                               kfree(ptr_cid_info->cid_array);
+                               kfree(ptr_cid_info);
+                               phba->cid_array_info[ulp_num] = NULL;
+                       }
+               }
+       }
+
+       return ret;
 }
 
 static void hwi_enable_intr(struct beiscsi_hba *phba)
@@ -4113,20 +4505,39 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
 
 static void beiscsi_clean_port(struct beiscsi_hba *phba)
 {
-       int mgmt_status;
-
-       mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
-       if (mgmt_status)
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-                           "BM_%d : mgmt_epfw_cleanup FAILED\n");
+       int mgmt_status, ulp_num;
+       struct ulp_cid_info *ptr_cid_info = NULL;
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+                       mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
+                       if (mgmt_status)
+                               beiscsi_log(phba, KERN_WARNING,
+                                           BEISCSI_LOG_INIT,
+                                           "BM_%d : mgmt_epfw_cleanup FAILED"
+                                           " for ULP_%d\n", ulp_num);
+               }
+       }
 
        hwi_purge_eq(phba);
        hwi_cleanup(phba);
        kfree(phba->io_sgl_hndl_base);
        kfree(phba->eh_sgl_hndl_base);
-       kfree(phba->cid_array);
        kfree(phba->ep_array);
        kfree(phba->conn_table);
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+                       ptr_cid_info = phba->cid_array_info[ulp_num];
+
+                       if (ptr_cid_info) {
+                               kfree(ptr_cid_info->cid_array);
+                               kfree(ptr_cid_info);
+                               phba->cid_array_info[ulp_num] = NULL;
+                       }
+               }
+       }
+
 }
 
 /**
@@ -4255,8 +4666,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
        doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
                             << DB_DEF_PDU_WRB_INDEX_SHIFT;
        doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-
-       iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+       iowrite32(doorbell, phba->db_va +
+                 beiscsi_conn->doorbell_offset);
 }
 
 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4481,7 +4892,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
                     DB_DEF_PDU_WRB_INDEX_MASK) <<
                     DB_DEF_PDU_WRB_INDEX_SHIFT;
        doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-       iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+       iowrite32(doorbell, phba->db_va +
+                 beiscsi_conn->doorbell_offset);
        return 0;
 }
 
@@ -4536,7 +4948,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
                     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
        doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
 
-       iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+       iowrite32(doorbell, phba->db_va +
+                 beiscsi_conn->doorbell_offset);
        return 0;
 }
 
@@ -4638,7 +5051,8 @@ static int beiscsi_mtask(struct iscsi_task *task)
        doorbell |= (io_task->pwrb_handle->wrb_index &
                     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
        doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
-       iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+       iowrite32(doorbell, phba->db_va +
+                 beiscsi_conn->doorbell_offset);
        return 0;
 }
 
@@ -4663,8 +5077,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
                struct beiscsi_hba *phba = NULL;
 
                phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
-                           "BM_%d : scsi_dma_map Failed\n");
+               beiscsi_log(phba, KERN_ERR,
+                           BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
+                           "BM_%d : scsi_dma_map Failed "
+                           "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
+                           be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
+                           io_task->libiscsi_itt, scsi_bufflen(sc));
 
                return num_sg;
        }
@@ -4769,10 +5187,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
 /*
  * beiscsi_quiesce()- Cleanup Driver resources
  * @phba: Instance Priv structure
+ * @unload_state:i Clean or EEH unload state
  *
  * Free the OS and HW resources held by the driver
  **/
-static void beiscsi_quiesce(struct beiscsi_hba *phba)
+static void beiscsi_quiesce(struct beiscsi_hba *phba,
+               uint32_t unload_state)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
@@ -4785,28 +5205,37 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
        if (phba->msix_enabled) {
                for (i = 0; i <= phba->num_cpus; i++) {
                        msix_vec = phba->msix_entries[i].vector;
+                       synchronize_irq(msix_vec);
                        free_irq(msix_vec, &phwi_context->be_eq[i]);
                        kfree(phba->msi_name[i]);
                }
        } else
-               if (phba->pcidev->irq)
+               if (phba->pcidev->irq) {
+                       synchronize_irq(phba->pcidev->irq);
                        free_irq(phba->pcidev->irq, phba);
+               }
        pci_disable_msix(phba->pcidev);
-       destroy_workqueue(phba->wq);
+
        if (blk_iopoll_enabled)
                for (i = 0; i < phba->num_cpus; i++) {
                        pbe_eq = &phwi_context->be_eq[i];
                        blk_iopoll_disable(&pbe_eq->iopoll);
                }
 
-       beiscsi_clean_port(phba);
-       beiscsi_free_mem(phba);
+       if (unload_state == BEISCSI_CLEAN_UNLOAD) {
+               destroy_workqueue(phba->wq);
+               beiscsi_clean_port(phba);
+               beiscsi_free_mem(phba);
 
-       beiscsi_unmap_pci_function(phba);
-       pci_free_consistent(phba->pcidev,
-                           phba->ctrl.mbox_mem_alloced.size,
-                           phba->ctrl.mbox_mem_alloced.va,
-                           phba->ctrl.mbox_mem_alloced.dma);
+               beiscsi_unmap_pci_function(phba);
+               pci_free_consistent(phba->pcidev,
+                                   phba->ctrl.mbox_mem_alloced.size,
+                                   phba->ctrl.mbox_mem_alloced.va,
+                                   phba->ctrl.mbox_mem_alloced.dma);
+       } else {
+               hwi_purge_eq(phba);
+               hwi_cleanup(phba);
+       }
 
        cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
 }
@@ -4823,11 +5252,13 @@ static void beiscsi_remove(struct pci_dev *pcidev)
        }
 
        beiscsi_destroy_def_ifaces(phba);
-       beiscsi_quiesce(phba);
+       beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
        iscsi_boot_destroy_kset(phba->boot_kset);
        iscsi_host_remove(phba->shost);
        pci_dev_put(phba->pcidev);
        iscsi_host_free(phba->shost);
+       pci_disable_pcie_error_reporting(pcidev);
+       pci_set_drvdata(pcidev, NULL);
        pci_disable_device(pcidev);
 }
 
@@ -4842,7 +5273,7 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)
                return;
        }
 
-       beiscsi_quiesce(phba);
+       beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
        pci_disable_device(pcidev);
 }
 
@@ -4880,6 +5311,167 @@ beiscsi_hw_health_check(struct work_struct *work)
                              msecs_to_jiffies(1000));
 }
 
+
+static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
+               pci_channel_state_t state)
+{
+       struct beiscsi_hba *phba = NULL;
+
+       phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+       phba->state |= BE_ADAPTER_PCI_ERR;
+
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                   "BM_%d : EEH error detected\n");
+
+       beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+
+       if (state == pci_channel_io_perm_failure) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : EEH : State PERM Failure");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_disable_device(pdev);
+
+       /* The error could cause the FW to trigger a flash debug dump.
+        * Resetting the card while flash dump is in progress
+        * can cause it not to recover; wait for it to finish.
+        * Wait only for first function as it is needed only once per
+        * adapter.
+        **/
+       if (pdev->devfn == 0)
+               ssleep(30);
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
+{
+       struct beiscsi_hba *phba = NULL;
+       int status = 0;
+
+       phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                   "BM_%d : EEH Reset\n");
+
+       status = pci_enable_device(pdev);
+       if (status)
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       pci_set_master(pdev);
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+
+       /* Wait for the CHIP Reset to complete */
+       status = be_chk_reset_complete(phba);
+       if (!status) {
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : EEH Reset Completed\n");
+       } else {
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : EEH Reset Completion Failure\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void beiscsi_eeh_resume(struct pci_dev *pdev)
+{
+       int ret = 0, i;
+       struct be_eq_obj *pbe_eq;
+       struct beiscsi_hba *phba = NULL;
+       struct hwi_controller *phwi_ctrlr;
+       struct hwi_context_memory *phwi_context;
+
+       phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
+       pci_save_state(pdev);
+
+       if (enable_msix)
+               find_num_cpus(phba);
+       else
+               phba->num_cpus = 1;
+
+       if (enable_msix) {
+               beiscsi_msix_enable(phba);
+               if (!phba->msix_enabled)
+                       phba->num_cpus = 1;
+       }
+
+       ret = beiscsi_cmd_reset_function(phba);
+       if (ret) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Reset Failed\n");
+               goto ret_err;
+       }
+
+       ret = be_chk_reset_complete(phba);
+       if (ret) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to get out of reset.\n");
+               goto ret_err;
+       }
+
+       beiscsi_get_params(phba);
+       phba->shost->max_id = phba->params.cxns_per_ctrl;
+       phba->shost->can_queue = phba->params.ios_per_ctrl;
+       ret = hwi_init_controller(phba);
+
+       for (i = 0; i < MAX_MCC_CMD; i++) {
+               init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+               phba->ctrl.mcc_tag[i] = i + 1;
+               phba->ctrl.mcc_numtag[i + 1] = 0;
+               phba->ctrl.mcc_tag_available++;
+       }
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_context = phwi_ctrlr->phwi_ctxt;
+
+       if (blk_iopoll_enabled) {
+               for (i = 0; i < phba->num_cpus; i++) {
+                       pbe_eq = &phwi_context->be_eq[i];
+                       blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
+                                       be_iopoll);
+                       blk_iopoll_enable(&pbe_eq->iopoll);
+               }
+
+               i = (phba->msix_enabled) ? i : 0;
+               /* Work item for MCC handling */
+               pbe_eq = &phwi_context->be_eq[i];
+               INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+       } else {
+               if (phba->msix_enabled) {
+                       for (i = 0; i <= phba->num_cpus; i++) {
+                               pbe_eq = &phwi_context->be_eq[i];
+                               INIT_WORK(&pbe_eq->work_cqs,
+                                         beiscsi_process_all_cqs);
+                       }
+               } else {
+                       pbe_eq = &phwi_context->be_eq[0];
+                       INIT_WORK(&pbe_eq->work_cqs,
+                                 beiscsi_process_all_cqs);
+               }
+       }
+
+       ret = beiscsi_init_irqs(phba);
+       if (ret < 0) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : beiscsi_eeh_resume - "
+                           "Failed to beiscsi_init_irqs\n");
+               goto ret_err;
+       }
+
+       hwi_enable_intr(phba);
+       phba->state &= ~BE_ADAPTER_PCI_ERR;
+
+       return;
+ret_err:
+       beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                   "BM_%d : AER EEH Resume Failed\n");
+}
+
 static int beiscsi_dev_probe(struct pci_dev *pcidev,
                             const struct pci_device_id *id)
 {
@@ -4887,7 +5479,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
        struct be_eq_obj *pbe_eq;
-       int ret, i;
+       int ret = 0, i;
 
        ret = beiscsi_enable_pci(pcidev);
        if (ret < 0) {
@@ -4903,10 +5495,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                goto disable_pci;
        }
 
+       /* Enable EEH reporting */
+       ret = pci_enable_pcie_error_reporting(pcidev);
+       if (ret)
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+                           "BM_%d : PCIe Error Reporting "
+                           "Enabling Failed\n");
+
+       pci_save_state(pcidev);
+
        /* Initialize Driver configuration Paramters */
        beiscsi_hba_attrs_init(phba);
 
        phba->fw_timeout = false;
+       phba->mac_addr_set = false;
 
 
        switch (pcidev->device) {
@@ -4929,20 +5531,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                phba->generation = 0;
        }
 
-       if (enable_msix)
-               find_num_cpus(phba);
-       else
-               phba->num_cpus = 1;
-
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BM_%d : num_cpus = %d\n",
-                   phba->num_cpus);
-
-       if (enable_msix) {
-               beiscsi_msix_enable(phba);
-               if (!phba->msix_enabled)
-                       phba->num_cpus = 1;
-       }
        ret = be_ctrl_init(phba, pcidev);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4954,27 +5542,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        ret = beiscsi_cmd_reset_function(phba);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Reset Failed. Aborting Crashdump\n");
+                           "BM_%d : Reset Failed\n");
                goto hba_free;
        }
        ret = be_chk_reset_complete(phba);
        if (ret) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Failed to get out of reset."
-                           "Aborting Crashdump\n");
+                           "BM_%d : Failed to get out of reset.\n");
                goto hba_free;
        }
 
        spin_lock_init(&phba->io_sgl_lock);
        spin_lock_init(&phba->mgmt_sgl_lock);
        spin_lock_init(&phba->isr_lock);
+       spin_lock_init(&phba->async_pdu_lock);
        ret = mgmt_get_fw_config(&phba->ctrl, phba);
        if (ret != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Error getting fw config\n");
                goto free_port;
        }
-       phba->shost->max_id = phba->fw_config.iscsi_cid_count;
+
+       if (enable_msix)
+               find_num_cpus(phba);
+       else
+               phba->num_cpus = 1;
+
+       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                   "BM_%d : num_cpus = %d\n",
+                   phba->num_cpus);
+
+       if (enable_msix) {
+               beiscsi_msix_enable(phba);
+               if (!phba->msix_enabled)
+                       phba->num_cpus = 1;
+       }
+
+       phba->shost->max_id = phba->params.cxns_per_ctrl;
        beiscsi_get_params(phba);
        phba->shost->can_queue = phba->params.ios_per_ctrl;
        ret = beiscsi_init_port(phba);
@@ -4985,7 +5589,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                goto free_port;
        }
 
-       for (i = 0; i < MAX_MCC_CMD ; i++) {
+       for (i = 0; i < MAX_MCC_CMD; i++) {
                init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
                phba->ctrl.mcc_tag[i] = i + 1;
                phba->ctrl.mcc_numtag[i + 1] = 0;
@@ -5089,6 +5693,12 @@ disable_pci:
        return ret;
 }
 
+static struct pci_error_handlers beiscsi_eeh_handlers = {
+       .error_detected = beiscsi_eeh_err_detected,
+       .slot_reset = beiscsi_eeh_reset,
+       .resume = beiscsi_eeh_resume,
+};
+
 struct iscsi_transport beiscsi_iscsi_transport = {
        .owner = THIS_MODULE,
        .name = DRV_NAME,
@@ -5127,7 +5737,8 @@ static struct pci_driver beiscsi_pci_driver = {
        .probe = beiscsi_dev_probe,
        .remove = beiscsi_remove,
        .shutdown = beiscsi_shutdown,
-       .id_table = beiscsi_pci_id_table
+       .id_table = beiscsi_pci_id_table,
+       .err_handler = &beiscsi_eeh_handlers
 };
 
 
index 2c06ef3c02aca49974f6bee2487f09b98c2b1d4d..31fa27b4a9b225a340ee1ce44445a9a09bd73575 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/in.h>
 #include <linux/ctype.h>
 #include <linux/module.h>
+#include <linux/aer.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -34,9 +35,8 @@
 #include <scsi/libiscsi.h>
 #include <scsi/scsi_transport_iscsi.h>
 
-#include "be.h"
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.0.467.0"
+#define BUILD_STR              "10.0.659.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -66,7 +66,6 @@
 
 #define MAX_CPUS               64
 #define BEISCSI_MAX_NUM_CPUS   7
-#define OC_SKH_MAX_NUM_CPUS    31
 
 #define BEISCSI_VER_STRLEN 32
 
@@ -74,6 +73,7 @@
 
 #define BEISCSI_CMD_PER_LUN    128     /* scsi_host->cmd_per_lun */
 #define BEISCSI_MAX_SECTORS    2048    /* scsi_host->max_sectors */
+#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
 
 #define BEISCSI_MAX_CMD_LEN    16      /* scsi_host->max_cmd_len */
 #define BEISCSI_NUM_MAX_LUN    256     /* scsi_host->max_lun */
 
 #define INVALID_SESS_HANDLE    0xFFFFFFFF
 
-#define BE_ADAPTER_UP          0x00000000
-#define BE_ADAPTER_LINK_DOWN   0x00000001
+#define BE_ADAPTER_LINK_UP     0x001
+#define BE_ADAPTER_LINK_DOWN   0x002
+#define BE_ADAPTER_PCI_ERR     0x004
+
+#define BEISCSI_CLEAN_UNLOAD   0x01
+#define BEISCSI_EEH_UNLOAD     0x02
 /**
  * hardware needs the async PDU buffers to be posted in multiples of 8
  * So have atleast 8 of them by default
  */
 
-#define HWI_GET_ASYNC_PDU_CTX(phwi)    (phwi->phwi_ctxt->pasync_ctx)
+#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num)   \
+       (phwi->phwi_ctxt->pasync_ctx[ulp_num])
 
 /********* Memory BAR register ************/
 #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET     0xfc
 #define DB_CQ_REARM_SHIFT              (29)    /* bit 29 */
 
 #define GET_HWI_CONTROLLER_WS(pc)      (pc->phwi_ctrlr)
-#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\
-               (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id)
-#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\
-               (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id)
+#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+               (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id)
+#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\
+               (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)
 
 #define PAGES_REQUIRED(x) \
        ((x < PAGE_SIZE) ? 1 :  ((x + PAGE_SIZE - 1) / PAGE_SIZE))
 
 #define BEISCSI_MSI_NAME 20 /* size of msi_name string */
 
+#define MEM_DESCR_OFFSET 8
+#define BEISCSI_DEFQ_HDR 1
+#define BEISCSI_DEFQ_DATA 0
 enum be_mem_enum {
        HWI_MEM_ADDN_CONTEXT,
        HWI_MEM_WRB,
        HWI_MEM_WRBH,
        HWI_MEM_SGLH,
        HWI_MEM_SGE,
-       HWI_MEM_ASYNC_HEADER_BUF,       /* 5 */
-       HWI_MEM_ASYNC_DATA_BUF,
-       HWI_MEM_ASYNC_HEADER_RING,
-       HWI_MEM_ASYNC_DATA_RING,
-       HWI_MEM_ASYNC_HEADER_HANDLE,
-       HWI_MEM_ASYNC_DATA_HANDLE,      /* 10 */
-       HWI_MEM_ASYNC_PDU_CONTEXT,
+       HWI_MEM_TEMPLATE_HDR_ULP0,
+       HWI_MEM_ASYNC_HEADER_BUF_ULP0,  /* 6 */
+       HWI_MEM_ASYNC_DATA_BUF_ULP0,
+       HWI_MEM_ASYNC_HEADER_RING_ULP0,
+       HWI_MEM_ASYNC_DATA_RING_ULP0,
+       HWI_MEM_ASYNC_HEADER_HANDLE_ULP0,
+       HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */
+       HWI_MEM_ASYNC_PDU_CONTEXT_ULP0,
+       HWI_MEM_TEMPLATE_HDR_ULP1,
+       HWI_MEM_ASYNC_HEADER_BUF_ULP1,  /* 14 */
+       HWI_MEM_ASYNC_DATA_BUF_ULP1,
+       HWI_MEM_ASYNC_HEADER_RING_ULP1,
+       HWI_MEM_ASYNC_DATA_RING_ULP1,
+       HWI_MEM_ASYNC_HEADER_HANDLE_ULP1,
+       HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */
+       HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,
        ISCSI_MEM_GLOBAL_HEADER,
        SE_MEM_MAX
 };
@@ -266,9 +283,49 @@ struct invalidate_command_table {
        unsigned short cid;
 } __packed;
 
+#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
+       (phwi_ctrlr->wrb_context[cri].ulp_num)
+struct hwi_wrb_context {
+       struct list_head wrb_handle_list;
+       struct list_head wrb_handle_drvr_list;
+       struct wrb_handle **pwrb_handle_base;
+       struct wrb_handle **pwrb_handle_basestd;
+       struct iscsi_wrb *plast_wrb;
+       unsigned short alloc_index;
+       unsigned short free_index;
+       unsigned short wrb_handles_available;
+       unsigned short cid;
+       uint8_t ulp_num;        /* ULP to which CID binded */
+       uint16_t register_set;
+       uint16_t doorbell_format;
+       uint32_t doorbell_offset;
+};
+
+struct ulp_cid_info {
+       unsigned short *cid_array;
+       unsigned short avlbl_cids;
+       unsigned short cid_alloc;
+       unsigned short cid_free;
+};
+
+#include "be.h"
 #define chip_be2(phba)      (phba->generation == BE_GEN2)
 #define chip_be3_r(phba)    (phba->generation == BE_GEN3)
 #define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
+
+#define BEISCSI_ULP0    0
+#define BEISCSI_ULP1    1
+#define BEISCSI_ULP_COUNT   2
+#define BEISCSI_ULP0_LOADED 0x01
+#define BEISCSI_ULP1_LOADED 0x02
+
+#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \
+       (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids)
+#define BEISCSI_ULP0_AVLBL_CID(phba) \
+       BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0)
+#define BEISCSI_ULP1_AVLBL_CID(phba) \
+       BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1)
+
 struct beiscsi_hba {
        struct hba_parameters params;
        struct hwi_controller *phwi_ctrlr;
@@ -303,17 +360,15 @@ struct beiscsi_hba {
        spinlock_t io_sgl_lock;
        spinlock_t mgmt_sgl_lock;
        spinlock_t isr_lock;
+       spinlock_t async_pdu_lock;
        unsigned int age;
-       unsigned short avlbl_cids;
-       unsigned short cid_alloc;
-       unsigned short cid_free;
        struct list_head hba_queue;
 #define BE_MAX_SESSION 2048
 #define BE_SET_CID_TO_CRI(cri_index, cid) \
                          (phba->cid_to_cri_map[cid] = cri_index)
 #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
        unsigned short cid_to_cri_map[BE_MAX_SESSION];
-       unsigned short *cid_array;
+       struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
        struct iscsi_endpoint **ep_array;
        struct beiscsi_conn **conn_table;
        struct iscsi_boot_kset *boot_kset;
@@ -325,20 +380,21 @@ struct beiscsi_hba {
                 * group together since they are used most frequently
                 * for cid to cri conversion
                 */
-               unsigned int iscsi_cid_start;
                unsigned int phys_port;
+               unsigned int eqid_count;
+               unsigned int cqid_count;
+               unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
+#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \
+               (phba->fw_config.iscsi_cid_count[ulp_num])
+               unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT];
+               unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT];
+               unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT];
+               unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT];
+               unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT];
 
-               unsigned int isr_offset;
-               unsigned int iscsi_icd_start;
-               unsigned int iscsi_cid_count;
-               unsigned int iscsi_icd_count;
-               unsigned int pci_function;
-
-               unsigned short cid_alloc;
-               unsigned short cid_free;
-               unsigned short avlbl_cids;
                unsigned short iscsi_features;
-               spinlock_t cid_lock;
+               uint16_t dual_ulp_aware;
+               unsigned long ulp_supported;
        } fw_config;
 
        unsigned int state;
@@ -346,6 +402,7 @@ struct beiscsi_hba {
        bool ue_detected;
        struct delayed_work beiscsi_hw_check_task;
 
+       bool mac_addr_set;
        u8 mac_address[ETH_ALEN];
        char fw_ver_str[BEISCSI_VER_STRLEN];
        char wq_name[20];
@@ -374,6 +431,7 @@ struct beiscsi_conn {
        struct iscsi_conn *conn;
        struct beiscsi_hba *phba;
        u32 exp_statsn;
+       u32 doorbell_offset;
        u32 beiscsi_conn_cid;
        struct beiscsi_endpoint *ep;
        unsigned short login_in_progress;
@@ -474,7 +532,7 @@ struct amap_iscsi_sge {
 };
 
 struct beiscsi_offload_params {
-       u32 dw[5];
+       u32 dw[6];
 };
 
 #define OFFLD_PARAMS_ERL       0x00000003
@@ -504,6 +562,7 @@ struct amap_beiscsi_offload_params {
        u8 max_r2t[16];
        u8 pad[8];
        u8 exp_statsn[32];
+       u8 max_recv_data_segment_length[32];
 };
 
 /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
@@ -567,7 +626,8 @@ struct hwi_async_pdu_context {
 
        unsigned int buffer_size;
        unsigned int num_entries;
-
+#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
+       unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
        /**
         * This is a varying size list! Do not add anything
         * after this entry!!
@@ -885,30 +945,32 @@ struct amap_iscsi_target_context_update_wrb_v2 {
        u8 first_burst_length[24]; /* DWORD 3 */
        u8 rsvd3[8]; /* DOWRD 3 */
        u8 max_r2t[16]; /* DWORD 4 */
-       u8 rsvd4[10];   /* DWORD 4 */
+       u8 rsvd4;       /* DWORD 4 */
        u8 hde;         /* DWORD 4 */
        u8 dde;         /* DWORD 4 */
        u8 erl[2];      /* DWORD 4 */
+       u8 rsvd5[6];    /* DWORD 4 */
        u8 imd;         /* DWORD 4 */
        u8 ir2t;        /* DWORD 4 */
+       u8 rsvd6[3];    /* DWORD 4 */
        u8 stat_sn[32];     /* DWORD 5 */
-       u8 rsvd5[32];   /* DWORD 6 */
-       u8 rsvd6[32];   /* DWORD 7 */
+       u8 rsvd7[32];   /* DWORD 6 */
+       u8 rsvd8[32];   /* DWORD 7 */
        u8 max_recv_dataseg_len[24];    /* DWORD 8 */
-       u8 rsvd7[8]; /* DWORD 8 */
-       u8 rsvd8[32];   /* DWORD 9 */
-       u8 rsvd9[32];   /* DWORD 10 */
+       u8 rsvd9[8]; /* DWORD 8 */
+       u8 rsvd10[32];   /* DWORD 9 */
+       u8 rsvd11[32];   /* DWORD 10 */
        u8 max_cxns[16]; /* DWORD 11 */
-       u8 rsvd10[11]; /* DWORD  11*/
+       u8 rsvd12[11]; /* DWORD  11*/
        u8 invld; /* DWORD 11 */
-       u8 rsvd11;/* DWORD 11*/
+       u8 rsvd13;/* DWORD 11*/
        u8 dmsg; /* DWORD 11 */
        u8 data_seq_inorder; /* DWORD 11 */
        u8 pdu_seq_inorder; /* DWORD 11 */
-       u8 rsvd12[32]; /*DWORD 12 */
-       u8 rsvd13[32]; /* DWORD 13 */
-       u8 rsvd14[32]; /* DWORD 14 */
-       u8 rsvd15[32]; /* DWORD 15 */
+       u8 rsvd14[32]; /*DWORD 12 */
+       u8 rsvd15[32]; /* DWORD 13 */
+       u8 rsvd16[32]; /* DWORD 14 */
+       u8 rsvd17[32]; /* DWORD 15 */
 } __packed;
 
 
@@ -919,6 +981,10 @@ struct be_ring {
        u32 cidx;               /* consumer index */
        u32 pidx;               /* producer index -- not used by most rings */
        u32 item_size;          /* size in bytes of one object */
+       u8 ulp_num;     /* ULP to which CID binded */
+       u16 register_set;
+       u16 doorbell_format;
+       u32 doorbell_offset;
 
        void *va;               /* The virtual address of the ring.  This
                                 * should be last to allow 32 & 64 bit debugger
@@ -926,18 +992,6 @@ struct be_ring {
                                 */
 };
 
-struct hwi_wrb_context {
-       struct list_head wrb_handle_list;
-       struct list_head wrb_handle_drvr_list;
-       struct wrb_handle **pwrb_handle_base;
-       struct wrb_handle **pwrb_handle_basestd;
-       struct iscsi_wrb *plast_wrb;
-       unsigned short alloc_index;
-       unsigned short free_index;
-       unsigned short wrb_handles_available;
-       unsigned short cid;
-};
-
 struct hwi_controller {
        struct list_head io_sgl_list;
        struct list_head eh_sgl_list;
@@ -946,8 +1000,8 @@ struct hwi_controller {
 
        struct hwi_wrb_context *wrb_context;
        struct mcc_wrb *pmcc_wrb_base;
-       struct be_ring default_pdu_hdr;
-       struct be_ring default_pdu_data;
+       struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
+       struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
        struct hwi_context_memory *phwi_ctxt;
 };
 
@@ -978,11 +1032,10 @@ struct hwi_context_memory {
        struct be_eq_obj be_eq[MAX_CPUS];
        struct be_queue_info be_cq[MAX_CPUS - 1];
 
-       struct be_queue_info be_def_hdrq;
-       struct be_queue_info be_def_dataq;
-
        struct be_queue_info *be_wrbq;
-       struct hwi_async_pdu_context *pasync_ctx;
+       struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
+       struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
+       struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
 };
 
 /* Logging related definitions */
@@ -992,6 +1045,7 @@ struct hwi_context_memory {
 #define BEISCSI_LOG_EH         0x0008  /* Error Handler */
 #define BEISCSI_LOG_IO         0x0010  /* IO Code Path */
 #define BEISCSI_LOG_CONFIG     0x0020  /* CONFIG Code Path */
+#define BEISCSI_LOG_ISCSI      0x0040  /* SCSI/iSCSI Protocol related Logs */
 
 #define beiscsi_log(phba, level, mask, fmt, arg...) \
 do { \
index 245a9595a93a131449fd660a7b7d8bd0e9e7f3b5..b2fcac78feaa995430fe62e4cd2587735313dabb 100644 (file)
@@ -278,6 +278,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
        return tag;
 }
 
+/**
+ * mgmt_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ **/
 int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
                                struct beiscsi_hba *phba)
 {
@@ -291,31 +303,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                          OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
+                          OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+                          EMBED_MBX_MAX_PAYLOAD_SIZE);
        status = be_mbox_notify(ctrl);
        if (!status) {
+               uint8_t ulp_num = 0;
                struct be_fw_cfg *pfw_cfg;
                pfw_cfg = req;
+
+               if (!is_chip_be2_be3r(phba)) {
+                       phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+                       phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+
+                       beiscsi_log(phba, KERN_INFO,
+                                   BEISCSI_LOG_INIT,
+                                   "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+                                   phba->fw_config.eqid_count,
+                                   phba->fw_config.cqid_count);
+               }
+
+               for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+                       if (pfw_cfg->ulp[ulp_num].ulp_mode &
+                           BEISCSI_ULP_ISCSI_INI_MODE)
+                               set_bit(ulp_num,
+                               &phba->fw_config.ulp_supported);
+
                phba->fw_config.phys_port = pfw_cfg->phys_port;
-               phba->fw_config.iscsi_icd_start =
-                                       pfw_cfg->ulp[0].icd_base;
-               phba->fw_config.iscsi_icd_count =
-                                       pfw_cfg->ulp[0].icd_count;
-               phba->fw_config.iscsi_cid_start =
-                                       pfw_cfg->ulp[0].sq_base;
-               phba->fw_config.iscsi_cid_count =
-                                       pfw_cfg->ulp[0].sq_count;
-               if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
-                       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                                   "BG_%d : FW reported MAX CXNS as %d\t"
-                                   "Max Supported = %d.\n",
-                                   phba->fw_config.iscsi_cid_count,
-                                   BE2_MAX_SESSIONS);
-                       phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
+               for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+                       if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+                               phba->fw_config.iscsi_cid_start[ulp_num] =
+                                       pfw_cfg->ulp[ulp_num].sq_base;
+                               phba->fw_config.iscsi_cid_count[ulp_num] =
+                                       pfw_cfg->ulp[ulp_num].sq_count;
+
+                               phba->fw_config.iscsi_icd_start[ulp_num] =
+                                       pfw_cfg->ulp[ulp_num].icd_base;
+                               phba->fw_config.iscsi_icd_count[ulp_num] =
+                                       pfw_cfg->ulp[ulp_num].icd_count;
+
+                               phba->fw_config.iscsi_chain_start[ulp_num] =
+                                       pfw_cfg->chain_icd[ulp_num].chain_base;
+                               phba->fw_config.iscsi_chain_count[ulp_num] =
+                                       pfw_cfg->chain_icd[ulp_num].chain_count;
+
+                               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                                           "BG_%d : Function loaded on ULP : %d\n"
+                                           "\tiscsi_cid_count : %d\n"
+                                           "\tiscsi_cid_start : %d\n"
+                                           "\t iscsi_icd_count : %d\n"
+                                           "\t iscsi_icd_start : %d\n",
+                                           ulp_num,
+                                           phba->fw_config.
+                                           iscsi_cid_count[ulp_num],
+                                           phba->fw_config.
+                                           iscsi_cid_start[ulp_num],
+                                           phba->fw_config.
+                                           iscsi_icd_count[ulp_num],
+                                           phba->fw_config.
+                                           iscsi_icd_start[ulp_num]);
+                       }
                }
+
+               phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+                                                 BEISCSI_FUNC_DUA_MODE);
+
+               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+                           "BG_%d : DUA Mode : 0x%x\n",
+                           phba->fw_config.dual_ulp_aware);
+
        } else {
-               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BG_%d : Failed in mgmt_get_fw_config\n");
+               status = -EINVAL;
        }
 
        spin_unlock(&ctrl->mbox_lock);
@@ -448,7 +508,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
        return tag;
 }
 
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
+/**
+ * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp_num: ULP number.
+ *
+ * return
+ *     Success: 0
+ *     Failure: Non-Zero Value
+ **/
+int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
@@ -462,9 +531,9 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
 
-       req->chute = chute;
-       req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
-       req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));
+       req->chute = (1 << ulp_num);
+       req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
+       req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
 
        status =  be_mcc_notify_wait(phba);
        if (status)
@@ -585,6 +654,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
        return tag;
 }
 
+/**
+ * mgmt_open_connection()- Establish a TCP CXN
+ * @dst_addr: Destination Address
+ * @beiscsi_ep: ptr to device endpoint struct
+ * @nonemb_cmd: ptr to memory allocated for command
+ *
+ * return
+ *     Success: Tag number of the MBX Command issued
+ *     Failure: Error code
+ **/
 int mgmt_open_connection(struct beiscsi_hba *phba,
                         struct sockaddr *dst_addr,
                         struct beiscsi_endpoint *beiscsi_ep,
@@ -602,14 +681,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        struct phys_addr template_address = { 0, 0 };
        struct phys_addr *ptemplate_address;
        unsigned int tag = 0;
-       unsigned int i;
+       unsigned int i, ulp_num;
        unsigned short cid = beiscsi_ep->ep_cid;
        struct be_sge *sge;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
-       def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
-       def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba);
+
+       ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num;
+
+       def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num);
+       def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);
 
        ptemplate_address = &template_address;
        ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
@@ -748,11 +830,14 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
 
        rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
        if (rc) {
+               /* Check if the IOCTL needs to be re-issued */
+               if (rc == -EAGAIN)
+                       return rc;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
                            "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
 
-               rc = -EIO;
                goto free_cmd;
        }
 
@@ -861,7 +946,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
                uint32_t boot_proto)
 {
        struct be_cmd_get_def_gateway_resp gtway_addr_set;
-       struct be_cmd_get_if_info_resp if_info;
+       struct be_cmd_get_if_info_resp *if_info;
        struct be_cmd_set_dhcp_req *dhcpreq;
        struct be_cmd_rel_dhcp_req *reldhcp;
        struct be_dma_mem nonemb_cmd;
@@ -872,16 +957,17 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
        if (mgmt_get_all_if_id(phba))
                return -EIO;
 
-       memset(&if_info, 0, sizeof(if_info));
        ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
                BE2_IPV6 : BE2_IPV4 ;
 
        rc = mgmt_get_if_info(phba, ip_type, &if_info);
-       if (rc)
+       if (rc) {
+               kfree(if_info);
                return rc;
+       }
 
        if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-               if (if_info.dhcp_state) {
+               if (if_info->dhcp_state) {
                        beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
                                    "BG_%d : DHCP Already Enabled\n");
                        return 0;
@@ -894,9 +980,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
                                IP_V6_LEN : IP_V4_LEN;
 
        } else {
-               if (if_info.dhcp_state) {
+               if (if_info->dhcp_state) {
 
-                       memset(&if_info, 0, sizeof(if_info));
+                       memset(if_info, 0, sizeof(*if_info));
                        rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
                                OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
                                sizeof(*reldhcp));
@@ -919,8 +1005,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
        }
 
        /* Delete the Static IP Set */
-       if (if_info.ip_addr.addr[0]) {
-               rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL,
+       if (if_info->ip_addr.addr[0]) {
+               rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
                                           IP_ACTION_DEL);
                if (rc)
                        return rc;
@@ -966,7 +1052,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
 
                return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
        } else {
-               return mgmt_static_ip_modify(phba, &if_info, ip_param,
+               return mgmt_static_ip_modify(phba, if_info, ip_param,
                                             subnet_param, IP_ACTION_ADD);
        }
 
@@ -1031,27 +1117,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
 }
 
 int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-                    struct be_cmd_get_if_info_resp *if_info)
+                    struct be_cmd_get_if_info_resp **if_info)
 {
        struct be_cmd_get_if_info_req *req;
        struct be_dma_mem nonemb_cmd;
+       uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
        int rc;
 
        if (mgmt_get_all_if_id(phba))
                return -EIO;
 
-       rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-                                OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
-                                sizeof(*if_info));
-       if (rc)
-               return rc;
+       do {
+               rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+                                        OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+                                        ioctl_size);
+               if (rc)
+                       return rc;
 
-       req = nonemb_cmd.va;
-       req->interface_hndl = phba->interface_handle;
-       req->ip_type = ip_type;
+               req = nonemb_cmd.va;
+               req->interface_hndl = phba->interface_handle;
+               req->ip_type = ip_type;
+
+               /* Allocate memory for if_info */
+               *if_info = kzalloc(ioctl_size, GFP_KERNEL);
+               if (!*if_info) {
+                       beiscsi_log(phba, KERN_ERR,
+                                   BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+                                   "BG_%d : Memory Allocation Failure\n");
+
+                               /* Free the DMA memory for the IOCTL issuing */
+                               pci_free_consistent(phba->ctrl.pdev,
+                                                   nonemb_cmd.size,
+                                                   nonemb_cmd.va,
+                                                   nonemb_cmd.dma);
+                               return -ENOMEM;
+               }
+
+               rc =  mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
+                                          ioctl_size);
+
+               /* Check if the error is because of Insufficent_Buffer */
+               if (rc == -EAGAIN) {
+
+                       /* Get the new memory size */
+                       ioctl_size = ((struct be_cmd_resp_hdr *)
+                                     nonemb_cmd.va)->actual_resp_len;
+                       ioctl_size += sizeof(struct be_cmd_req_hdr);
 
-       return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info,
-                                   sizeof(*if_info));
+                       /* Free the previous allocated DMA memory */
+                       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                                           nonemb_cmd.va,
+                                           nonemb_cmd.dma);
+
+                       /* Free the virtual memory */
+                       kfree(*if_info);
+               } else
+                       break;
+       } while (true);
+       return rc;
 }
 
 int mgmt_get_nic_conf(struct beiscsi_hba *phba,
@@ -1281,7 +1404,7 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
 }
 
 /**
- * beiscsi_active_cid_disp()- Display Sessions Active
+ * beiscsi_active_session_disp()- Display Sessions Active
  * @dev: ptr to device not used.
  * @attr: device attribute, not used.
  * @buf: contains formatted text Session Count
@@ -1290,14 +1413,56 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
  * size of the formatted string
  **/
 ssize_t
-beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct Scsi_Host *shost = class_to_shost(dev);
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
+       uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0;
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
+                       avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
+                       total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
+                       len += snprintf(buf+len, PAGE_SIZE - len,
+                                       "ULP%d : %d\n", ulp_num,
+                                       (total_cids - avlbl_cids));
+               } else
+                       len += snprintf(buf+len, PAGE_SIZE - len,
+                                       "ULP%d : %d\n", ulp_num, 0);
+       }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                      (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+       return len;
+}
+
+/**
+ * beiscsi_free_session_disp()- Display Avaliable Session
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
+                      char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+       uint16_t ulp_num, len = 0;
+
+       for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+               if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
+                       len += snprintf(buf+len, PAGE_SIZE - len,
+                                       "ULP%d : %d\n", ulp_num,
+                                       BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
+               else
+                       len += snprintf(buf+len, PAGE_SIZE - len,
+                                       "ULP%d : %d\n", ulp_num, 0);
+       }
+
+       return len;
 }
 
 /**
@@ -1338,6 +1503,25 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
        }
 }
 
+/**
+ * beiscsi_phys_port()- Display Physical Port Identifier
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text port identifier
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+                       phba->fw_config.phys_port);
+}
 
 void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
                             struct wrb_handle *pwrb_handle,
@@ -1411,10 +1595,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
 
        memset(pwrb, 0, sizeof(*pwrb));
 
-       AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
-                     max_burst_length, pwrb, params->dw[offsetof
-                     (struct amap_beiscsi_offload_params,
-                     max_burst_length) / 32]);
        AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
                      max_burst_length, pwrb, params->dw[offsetof
                      (struct amap_beiscsi_offload_params,
@@ -1436,7 +1616,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
                      params->dw[offsetof(struct amap_beiscsi_offload_params,
                      first_burst_length) / 32]);
        AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
-                     max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+                     max_recv_dataseg_len, pwrb,
+                     params->dw[offsetof(struct amap_beiscsi_offload_params,
+                     max_recv_data_segment_length) / 32]);
        AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
                      max_cxns, pwrb, BEISCSI_MAX_CXNS);
        AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
index 04af7e74fe4884a54a773f0471b264412de726d7..01b8c97284c0653753f6bc8085c47f799ce09f79 100644 (file)
@@ -294,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
                      struct be_cmd_get_nic_conf_resp *mac);
 
 int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-                    struct be_cmd_get_if_info_resp *if_info);
+                    struct be_cmd_get_if_info_resp **if_info);
 
 int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
                     struct be_cmd_get_def_gateway_resp *gateway);
@@ -315,12 +315,19 @@ ssize_t beiscsi_drvr_ver_disp(struct device *dev,
 ssize_t beiscsi_fw_ver_disp(struct device *dev,
                             struct device_attribute *attr, char *buf);
 
-ssize_t beiscsi_active_cid_disp(struct device *dev,
-                                struct device_attribute *attr, char *buf);
+ssize_t beiscsi_active_session_disp(struct device *dev,
+                                    struct device_attribute *attr, char *buf);
 
 ssize_t beiscsi_adap_family_disp(struct device *dev,
                                  struct device_attribute *attr, char *buf);
 
+
+ssize_t beiscsi_free_session_disp(struct device *dev,
+                                  struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_phys_port_disp(struct device *dev,
+                               struct device_attribute *attr, char *buf);
+
 void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
                             struct wrb_handle *pwrb_handle,
                             struct be_mem_descriptor *mem_descr);
index d7ca9305ff457037089e1edba2cc33b28bec1073..1ebf3fb683e6c623fc8982d53bdb77479cd37fa8 100644 (file)
@@ -64,7 +64,7 @@
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.14"
+#define BNX2FC_VERSION         "2.4.1"
 
 #define PFX                    "bnx2fc: "
 
index 69ac55495c1d7479d75f79b13fdb22659875b95c..3c204ad1d5b9ad66b055b45fecded500641207dd 100644 (file)
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Mar 08, 2013"
+#define DRV_MODULE_RELDATE     "Sep 17, 2013"
 
 
 static char version[] =
@@ -2004,6 +2004,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
                set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
 }
 
+/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
+static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
+{
+       struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+       if (interface->enabled == true) {
+               if (!ctlr->lp) {
+                       pr_err(PFX "__bnx2fc_disable: lport not found\n");
+                       return -ENODEV;
+               } else {
+                       interface->enabled = false;
+                       fcoe_ctlr_link_down(ctlr);
+                       fcoe_clean_pending_queue(ctlr->lp);
+               }
+       }
+       return 0;
+}
+
 /**
  * Deperecated: Use bnx2fc_enabled()
  */
@@ -2018,20 +2036,34 @@ static int bnx2fc_disable(struct net_device *netdev)
 
        interface = bnx2fc_interface_lookup(netdev);
        ctlr = bnx2fc_to_ctlr(interface);
-       if (!interface || !ctlr->lp) {
+
+       if (!interface) {
                rc = -ENODEV;
-               printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
+               pr_err(PFX "bnx2fc_disable: interface not found\n");
        } else {
-               interface->enabled = false;
-               fcoe_ctlr_link_down(ctlr);
-               fcoe_clean_pending_queue(ctlr->lp);
+               rc = __bnx2fc_disable(ctlr);
        }
-
        mutex_unlock(&bnx2fc_dev_lock);
        rtnl_unlock();
        return rc;
 }
 
+static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
+{
+       struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+
+       if (interface->enabled == false) {
+               if (!ctlr->lp) {
+                       pr_err(PFX "__bnx2fc_enable: lport not found\n");
+                       return -ENODEV;
+               } else if (!bnx2fc_link_ok(ctlr->lp)) {
+                       fcoe_ctlr_link_up(ctlr);
+                       interface->enabled = true;
+               }
+       }
+       return 0;
+}
+
 /**
  * Deprecated: Use bnx2fc_enabled()
  */
@@ -2046,12 +2078,11 @@ static int bnx2fc_enable(struct net_device *netdev)
 
        interface = bnx2fc_interface_lookup(netdev);
        ctlr = bnx2fc_to_ctlr(interface);
-       if (!interface || !ctlr->lp) {
+       if (!interface) {
                rc = -ENODEV;
-               printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
-       } else if (!bnx2fc_link_ok(ctlr->lp)) {
-               fcoe_ctlr_link_up(ctlr);
-               interface->enabled = true;
+               pr_err(PFX "bnx2fc_enable: interface not found\n");
+       } else {
+               rc = __bnx2fc_enable(ctlr);
        }
 
        mutex_unlock(&bnx2fc_dev_lock);
@@ -2072,14 +2103,12 @@ static int bnx2fc_enable(struct net_device *netdev)
 static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
 {
        struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
-       struct fc_lport *lport = ctlr->lp;
-       struct net_device *netdev = bnx2fc_netdev(lport);
 
        switch (cdev->enabled) {
        case FCOE_CTLR_ENABLED:
-               return bnx2fc_enable(netdev);
+               return __bnx2fc_enable(ctlr);
        case FCOE_CTLR_DISABLED:
-               return bnx2fc_disable(netdev);
+               return __bnx2fc_disable(ctlr);
        case FCOE_CTLR_UNUSED:
        default:
                return -ENOTSUPP;
index 575142e92d9c5219b10fb286d23a12ee1580db58..ed880891cb7c4b7c038ee73aac032b77108377d4 100644 (file)
@@ -1246,6 +1246,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
                        kref_put(&io_req->refcount,
                                 bnx2fc_cmd_release); /* drop timer hold */
                rc = bnx2fc_expl_logo(lport, io_req);
+               /* This only occurs when an task abort was requested while ABTS
+                  is in progress.  Setting the IO_CLEANUP flag will skip the
+                  RRQ process in the case when the fw generated SCSI_CMD cmpl
+                  was a result from the ABTS request rather than the CLEANUP
+                  request */
+               set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
                goto out;
        }
 
index 69c915aa77c24737e17680805b67d4507df59c4c..4b9cf93f3fb6274b7066c75d170b71a8f3bb5b2e 100644 (file)
@@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
        {"IBM", "1742"},
        {"IBM", "1745"},
        {"IBM", "1746"},
+       {"IBM", "1813"},
        {"IBM", "1814"},
        {"IBM", "1815"},
        {"IBM", "1818"},
index 0838e265e0b9543899030498435509b597f29b03..3fd305d6b67d893f77e299b49269e5e7035ca43f 100644 (file)
@@ -799,47 +799,47 @@ struct esas2r_adapter {
        struct esas2r_target *targetdb_end;
        unsigned char *regs;
        unsigned char *data_window;
-       u32 volatile flags;
-       #define AF_PORT_CHANGE      (u32)(0x00000001)
-       #define AF_CHPRST_NEEDED    (u32)(0x00000004)
-       #define AF_CHPRST_PENDING   (u32)(0x00000008)
-       #define AF_CHPRST_DETECTED  (u32)(0x00000010)
-       #define AF_BUSRST_NEEDED    (u32)(0x00000020)
-       #define AF_BUSRST_PENDING   (u32)(0x00000040)
-       #define AF_BUSRST_DETECTED  (u32)(0x00000080)
-       #define AF_DISABLED         (u32)(0x00000100)
-       #define AF_FLASH_LOCK       (u32)(0x00000200)
-       #define AF_OS_RESET         (u32)(0x00002000)
-       #define AF_FLASHING         (u32)(0x00004000)
-       #define AF_POWER_MGT        (u32)(0x00008000)
-       #define AF_NVR_VALID        (u32)(0x00010000)
-       #define AF_DEGRADED_MODE    (u32)(0x00020000)
-       #define AF_DISC_PENDING     (u32)(0x00040000)
-       #define AF_TASKLET_SCHEDULED    (u32)(0x00080000)
-       #define AF_HEARTBEAT        (u32)(0x00200000)
-       #define AF_HEARTBEAT_ENB    (u32)(0x00400000)
-       #define AF_NOT_PRESENT      (u32)(0x00800000)
-       #define AF_CHPRST_STARTED   (u32)(0x01000000)
-       #define AF_FIRST_INIT       (u32)(0x02000000)
-       #define AF_POWER_DOWN       (u32)(0x04000000)
-       #define AF_DISC_IN_PROG     (u32)(0x08000000)
-       #define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
-       #define AF_LEGACY_SGE_MODE  (u32)(0x20000000)
-       #define AF_DISC_POLLED      (u32)(0x40000000)
-       u32 volatile flags2;
-       #define AF2_SERIAL_FLASH    (u32)(0x00000001)
-       #define AF2_DEV_SCAN        (u32)(0x00000002)
-       #define AF2_DEV_CNT_OK      (u32)(0x00000004)
-       #define AF2_COREDUMP_AVAIL  (u32)(0x00000008)
-       #define AF2_COREDUMP_SAVED  (u32)(0x00000010)
-       #define AF2_VDA_POWER_DOWN  (u32)(0x00000100)
-       #define AF2_THUNDERLINK     (u32)(0x00000200)
-       #define AF2_THUNDERBOLT     (u32)(0x00000400)
-       #define AF2_INIT_DONE       (u32)(0x00000800)
-       #define AF2_INT_PENDING     (u32)(0x00001000)
-       #define AF2_TIMER_TICK      (u32)(0x00002000)
-       #define AF2_IRQ_CLAIMED     (u32)(0x00004000)
-       #define AF2_MSI_ENABLED     (u32)(0x00008000)
+       long flags;
+       #define AF_PORT_CHANGE      0
+       #define AF_CHPRST_NEEDED    1
+       #define AF_CHPRST_PENDING   2
+       #define AF_CHPRST_DETECTED  3
+       #define AF_BUSRST_NEEDED    4
+       #define AF_BUSRST_PENDING   5
+       #define AF_BUSRST_DETECTED  6
+       #define AF_DISABLED         7
+       #define AF_FLASH_LOCK       8
+       #define AF_OS_RESET         9
+       #define AF_FLASHING         10
+       #define AF_POWER_MGT        11
+       #define AF_NVR_VALID        12
+       #define AF_DEGRADED_MODE    13
+       #define AF_DISC_PENDING     14
+       #define AF_TASKLET_SCHEDULED    15
+       #define AF_HEARTBEAT        16
+       #define AF_HEARTBEAT_ENB    17
+       #define AF_NOT_PRESENT      18
+       #define AF_CHPRST_STARTED   19
+       #define AF_FIRST_INIT       20
+       #define AF_POWER_DOWN       21
+       #define AF_DISC_IN_PROG     22
+       #define AF_COMM_LIST_TOGGLE 23
+       #define AF_LEGACY_SGE_MODE  24
+       #define AF_DISC_POLLED      25
+       long flags2;
+       #define AF2_SERIAL_FLASH    0
+       #define AF2_DEV_SCAN        1
+       #define AF2_DEV_CNT_OK      2
+       #define AF2_COREDUMP_AVAIL  3
+       #define AF2_COREDUMP_SAVED  4
+       #define AF2_VDA_POWER_DOWN  5
+       #define AF2_THUNDERLINK     6
+       #define AF2_THUNDERBOLT     7
+       #define AF2_INIT_DONE       8
+       #define AF2_INT_PENDING     9
+       #define AF2_TIMER_TICK      10
+       #define AF2_IRQ_CLAIMED     11
+       #define AF2_MSI_ENABLED     12
        atomic_t disable_cnt;
        atomic_t dis_ints_cnt;
        u32 int_stat;
@@ -1150,16 +1150,6 @@ void esas2r_queue_fw_event(struct esas2r_adapter *a,
                           int data_sz);
 
 /* Inline functions */
-static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
-{
-       return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
-}
-
-static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
-{
-       return test_and_clear_bit(ilog2(bits),
-                                 (volatile unsigned long *)flags);
-}
 
 /* Allocate a chip scatter/gather list entry */
 static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
@@ -1217,7 +1207,6 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
                                          struct esas2r_adapter *a)
 {
        union atto_vda_req *vrq = rq->vrq;
-       u32 handle;
 
        INIT_LIST_HEAD(&rq->sg_table_head);
        rq->data_buf = (void *)(vrq + 1);
@@ -1253,11 +1242,9 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
 
        /*
         * add a reference number to the handle to make it unique (until it
-        * wraps of course) while preserving the upper word
+        * wraps of course) while preserving the least significant word
         */
-
-       handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
-       vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
+       vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
 
        /*
         * the following formats a SCSI request.  the caller can override as
@@ -1303,10 +1290,13 @@ static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
 
 static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
 {
-       return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
-                           | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
-                           | AF_PORT_CHANGE))
-              ? true : false;
+
+       return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
+              test_bit(AF_BUSRST_DETECTED, &a->flags) ||
+              test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+              test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+              test_bit(AF_PORT_CHANGE, &a->flags);
+
 }
 
 /*
@@ -1345,24 +1335,24 @@ static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
 static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
 {
        /* make sure we don't schedule twice */
-       if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
-             ilog2(AF_TASKLET_SCHEDULED)))
+       if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
                tasklet_hi_schedule(&a->tasklet);
 }
 
 static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
 {
-       if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
-           && (a->nvram->options2 & SASNVR2_HEARTBEAT))
-               esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
+       if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+           !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+           (a->nvram->options2 & SASNVR2_HEARTBEAT))
+               set_bit(AF_HEARTBEAT_ENB, &a->flags);
        else
-               esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+               clear_bit(AF_HEARTBEAT_ENB, &a->flags);
 }
 
 static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
 {
-       esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
-       esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+       clear_bit(AF_HEARTBEAT_ENB, &a->flags);
+       clear_bit(AF_HEARTBEAT, &a->flags);
 }
 
 /* Set the initial state for resetting the adapter on the next pass through
@@ -1372,9 +1362,9 @@ static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
 {
        esas2r_disable_heartbeat(a);
 
-       esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
-       esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
-       esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+       set_bit(AF_CHPRST_NEEDED, &a->flags);
+       set_bit(AF_CHPRST_PENDING, &a->flags);
+       set_bit(AF_DISC_PENDING, &a->flags);
 }
 
 /* See if an interrupt is pending on the adapter. */
index dec6c334ce3e74235e8df6b7acb41d631bbbd8f9..1c079f4300a56a4d42ab3e5564dc9460b7704675 100644 (file)
@@ -86,9 +86,9 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)
 
        esas2r_trace_enter();
 
-       esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
-       esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
-       esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
+       clear_bit(AF_DISC_IN_PROG, &a->flags);
+       clear_bit(AF2_DEV_SCAN, &a->flags2);
+       clear_bit(AF2_DEV_CNT_OK, &a->flags2);
 
        a->disc_start_time = jiffies_to_msecs(jiffies);
        a->disc_wait_time = nvr->dev_wait_time * 1000;
@@ -107,7 +107,8 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)
 
        a->general_req.interrupt_cx = NULL;
 
-       if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
+       if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
+           test_bit(AF_POWER_MGT, &a->flags)) {
                if (a->prev_dev_cnt == 0) {
                        /* Don't bother waiting if there is nothing to wait
                         * for.
@@ -212,9 +213,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
                        || a->disc_wait_cnt == 0)) {
                        /* After three seconds of waiting, schedule a scan. */
                        if (time >= 3000
-                           && !(esas2r_lock_set_flags(&a->flags2,
-                                                      AF2_DEV_SCAN) &
-                                ilog2(AF2_DEV_SCAN))) {
+                           && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
                                spin_lock_irqsave(&a->mem_lock, flags);
                                esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
                                spin_unlock_irqrestore(&a->mem_lock, flags);
@@ -228,18 +227,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
                 * We are done waiting...we think.  Adjust the wait time to
                 * consume events after the count is met.
                 */
-               if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
-                     & ilog2(AF2_DEV_CNT_OK)))
+               if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
                        a->disc_wait_time = time + 3000;
 
                /* If we haven't done a full scan yet, do it now. */
-               if (!(esas2r_lock_set_flags(&a->flags2,
-                                           AF2_DEV_SCAN) &
-                     ilog2(AF2_DEV_SCAN))) {
+               if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
                        spin_lock_irqsave(&a->mem_lock, flags);
                        esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
                        spin_unlock_irqrestore(&a->mem_lock, flags);
-
                        esas2r_trace_exit();
                        return;
                }
@@ -253,9 +248,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
                        return;
                }
        } else {
-               if (!(esas2r_lock_set_flags(&a->flags2,
-                                           AF2_DEV_SCAN) &
-                     ilog2(AF2_DEV_SCAN))) {
+               if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
                        spin_lock_irqsave(&a->mem_lock, flags);
                        esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
                        spin_unlock_irqrestore(&a->mem_lock, flags);
@@ -265,8 +258,8 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
        /* We want to stop waiting for devices. */
        a->disc_wait_time = 0;
 
-       if ((a->flags & AF_DISC_POLLED)
-           && (a->flags & AF_DISC_IN_PROG)) {
+       if (test_bit(AF_DISC_POLLED, &a->flags) &&
+           test_bit(AF_DISC_IN_PROG, &a->flags)) {
                /*
                 * Polled discovery is still pending so continue the active
                 * discovery until it is done.  At that point, we will stop
@@ -280,14 +273,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
                 * driven; i.e. There is no transition.
                 */
                esas2r_disc_fix_curr_requests(a);
-               esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+               clear_bit(AF_DISC_PENDING, &a->flags);
 
                /*
                 * We have deferred target state changes until now because we
                 * don't want to report any removals (due to the first arrival)
                 * until the device wait time expires.
                 */
-               esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+               set_bit(AF_PORT_CHANGE, &a->flags);
        }
 
        esas2r_trace_exit();
@@ -308,7 +301,8 @@ void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
         * Don't start discovery before or during polled discovery.  if we did,
         * we would have a deadlock if we are in the ISR already.
         */
-       if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
+       if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+           !test_bit(AF_DISC_POLLED, &a->flags))
                esas2r_disc_start_port(a);
 
        esas2r_trace_exit();
@@ -322,7 +316,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
 
        esas2r_trace_enter();
 
-       if (a->flags & AF_DISC_IN_PROG) {
+       if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
                esas2r_trace_exit();
 
                return false;
@@ -330,7 +324,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
 
        /* If there is a discovery waiting, process it. */
        if (dc->disc_evt) {
-               if ((a->flags & AF_DISC_POLLED)
+               if (test_bit(AF_DISC_POLLED, &a->flags)
                    && a->disc_wait_time == 0) {
                        /*
                         * We are doing polled discovery, but we no longer want
@@ -347,7 +341,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
 
                esas2r_hdebug("disc done");
 
-               esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+               set_bit(AF_PORT_CHANGE, &a->flags);
 
                esas2r_trace_exit();
 
@@ -356,10 +350,10 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
 
        /* Handle the discovery context */
        esas2r_trace("disc_evt: %d", dc->disc_evt);
-       esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
+       set_bit(AF_DISC_IN_PROG, &a->flags);
        dc->flags = 0;
 
-       if (a->flags & AF_DISC_POLLED)
+       if (test_bit(AF_DISC_POLLED, &a->flags))
                dc->flags |= DCF_POLLED;
 
        rq->interrupt_cx = dc;
@@ -379,7 +373,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
        }
 
        /* Continue interrupt driven discovery */
-       if (!(a->flags & AF_DISC_POLLED))
+       if (!test_bit(AF_DISC_POLLED, &a->flags))
                ret = esas2r_disc_continue(a, rq);
        else
                ret = true;
@@ -453,10 +447,10 @@ static bool esas2r_disc_continue(struct esas2r_adapter *a,
        /* Discovery is done...for now. */
        rq->interrupt_cx = NULL;
 
-       if (!(a->flags & AF_DISC_PENDING))
+       if (!test_bit(AF_DISC_PENDING, &a->flags))
                esas2r_disc_fix_curr_requests(a);
 
-       esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+       clear_bit(AF_DISC_IN_PROG, &a->flags);
 
        /* Start the next discovery. */
        return esas2r_disc_start_port(a);
@@ -480,7 +474,8 @@ static bool esas2r_disc_start_request(struct esas2r_adapter *a,
 
        spin_lock_irqsave(&a->queue_lock, flags);
 
-       if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
+       if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
+           !test_bit(AF_FLASHING, &a->flags))
                esas2r_disc_local_start_request(a, rq);
        else
                list_add_tail(&rq->req_list, &a->defer_list);
index 2ec3c23275b833b63465da412ffff29e20496533..b7dc59fca7a633c950d9760a82266a9feea376da 100644 (file)
@@ -231,7 +231,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
         * RS_PENDING, FM API tasks will continue.
         */
        rq->req_stat = RS_PENDING;
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                /* not suppported for now */;
        else
                build_flash_msg(a, rq);
@@ -315,7 +315,7 @@ static bool complete_fmapi_req(struct esas2r_adapter *a,
                memset(fc->scratch, 0, FM_BUF_SZ);
 
        esas2r_enable_heartbeat(a);
-       esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
+       clear_bit(AF_FLASH_LOCK, &a->flags);
        return false;
 }
 
@@ -526,7 +526,7 @@ no_cfg:
                         * The download is complete.  If in degraded mode,
                         * attempt a chip reset.
                         */
-                       if (a->flags & AF_DEGRADED_MODE)
+                       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                                esas2r_local_reset_adapter(a);
 
                        a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
@@ -890,7 +890,7 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
                }
        }
 
-       if (a->flags & AF_DEGRADED_MODE) {
+       if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
                fs->status = ATTO_STS_DEGRADED;
                return false;
        }
@@ -945,8 +945,12 @@ static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
 
        /* Now wait for the firmware to process it */
        starttime = jiffies_to_msecs(jiffies);
-       timeout = a->flags &
-                 (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
+
+       if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+           test_bit(AF_DISC_PENDING, &a->flags))
+               timeout = 40000;
+       else
+               timeout = 5000;
 
        while (true) {
                intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
@@ -1008,7 +1012,7 @@ bool esas2r_read_flash_block(struct esas2r_adapter *a,
                u32 offset;
                u32 iatvr;
 
-               if (a->flags2 & AF2_SERIAL_FLASH)
+               if (test_bit(AF2_SERIAL_FLASH, &a->flags2))
                        iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
                else
                        iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
@@ -1236,9 +1240,9 @@ static void esas2r_nvram_callback(struct esas2r_adapter *a,
        if (rq->req_stat != RS_PENDING) {
                /* update the NVRAM state */
                if (rq->req_stat == RS_SUCCESS)
-                       esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+                       set_bit(AF_NVR_VALID, &a->flags);
                else
-                       esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+                       clear_bit(AF_NVR_VALID, &a->flags);
 
                esas2r_enable_heartbeat(a);
 
@@ -1258,7 +1262,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
        u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
        struct atto_vda_flash_req *vrq = &rq->vrq->flash;
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return false;
 
        if (down_interruptible(&a->nvram_semaphore))
@@ -1302,7 +1306,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
                               FLS_OFFSET_NVR,
                               sizeof(struct esas2r_sas_nvram));
 
-       if (a->flags & AF_LEGACY_SGE_MODE) {
+       if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
 
                vrq->data.sge[0].length =
                        cpu_to_le32(SGE_LAST |
@@ -1337,7 +1341,7 @@ bool esas2r_nvram_validate(struct esas2r_adapter *a)
        } else if (n->version > SASNVR_VERSION) {
                esas2r_hdebug("invalid NVRAM version");
        } else {
-               esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+               set_bit(AF_NVR_VALID, &a->flags);
                rslt = true;
        }
 
@@ -1359,7 +1363,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
        struct esas2r_sas_nvram *n = a->nvram;
        u32 time = jiffies_to_msecs(jiffies);
 
-       esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+       clear_bit(AF_NVR_VALID, &a->flags);
        *n = default_sas_nvram;
        n->sas_addr[3] |= 0x0F;
        n->sas_addr[4] = HIBYTE(LOWORD(time));
@@ -1389,7 +1393,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
        u8 j;
        struct esas2r_component_header *ch;
 
-       if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
+       if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {
                /* flag was already set */
                fi->status = FI_STAT_BUSY;
                return false;
@@ -1413,7 +1417,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
                return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
        }
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
 
        switch (fi->action) {
index da1869df24088a2c2647027033ecd8d3e171c130..b9750e296d7116ab38f91017a82eb98279c3ed1e 100644 (file)
@@ -216,7 +216,7 @@ use_legacy_interrupts:
                        goto use_legacy_interrupts;
                }
                a->intr_mode = INTR_MODE_MSI;
-               esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
+               set_bit(AF2_MSI_ENABLED, &a->flags2);
                break;
 
 
@@ -252,7 +252,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
                return;
        }
 
-       esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
+       set_bit(AF2_IRQ_CLAIMED, &a->flags2);
        esas2r_log(ESAS2R_LOG_INFO,
                   "claimed IRQ %d flags: 0x%lx",
                   a->pcid->irq, flags);
@@ -380,10 +380,10 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
        /* interrupts will be disabled until we are done with init */
        atomic_inc(&a->dis_ints_cnt);
        atomic_inc(&a->disable_cnt);
-       a->flags |= AF_CHPRST_PENDING
-                   | AF_DISC_PENDING
-                   | AF_FIRST_INIT
-                   | AF_LEGACY_SGE_MODE;
+       set_bit(AF_CHPRST_PENDING, &a->flags);
+       set_bit(AF_DISC_PENDING, &a->flags);
+       set_bit(AF_FIRST_INIT, &a->flags);
+       set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
        a->init_msg = ESAS2R_INIT_MSG_START;
        a->max_vdareq_size = 128;
@@ -440,11 +440,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
 
        esas2r_claim_interrupts(a);
 
-       if (a->flags2 & AF2_IRQ_CLAIMED)
+       if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
                esas2r_enable_chip_interrupts(a);
 
-       esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
-       if (!(a->flags & AF_DEGRADED_MODE))
+       set_bit(AF2_INIT_DONE, &a->flags2);
+       if (!test_bit(AF_DEGRADED_MODE, &a->flags))
                esas2r_kickoff_timer(a);
        esas2r_debug("esas2r_init_adapter done for %p (%d)",
                     a, a->disable_cnt);
@@ -457,8 +457,8 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
 {
        struct esas2r_mem_desc *memdesc, *next;
 
-       if ((a->flags2 & AF2_INIT_DONE)
-           &&  (!(a->flags & AF_DEGRADED_MODE))) {
+       if ((test_bit(AF2_INIT_DONE, &a->flags2))
+           &&  (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
                if (!power_management) {
                        del_timer_sync(&a->timer);
                        tasklet_kill(&a->tasklet);
@@ -508,19 +508,19 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
        }
 
        /* Clean up interrupts */
-       if (a->flags2 & AF2_IRQ_CLAIMED) {
+       if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
                esas2r_log_dev(ESAS2R_LOG_INFO,
                               &(a->pcid->dev),
                               "free_irq(%d) called", a->pcid->irq);
 
                free_irq(a->pcid->irq, a);
                esas2r_debug("IRQ released");
-               esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
+               clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
        }
 
-       if (a->flags2 & AF2_MSI_ENABLED) {
+       if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
                pci_disable_msi(a->pcid);
-               esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
+               clear_bit(AF2_MSI_ENABLED, &a->flags2);
                esas2r_debug("MSI disabled");
        }
 
@@ -641,12 +641,10 @@ void esas2r_kill_adapter(int i)
                pci_set_drvdata(a->pcid, NULL);
                esas2r_adapters[i] = NULL;
 
-               if (a->flags2 & AF2_INIT_DONE) {
-                       esas2r_lock_clear_flags(&a->flags2,
-                                               AF2_INIT_DONE);
+               if (test_bit(AF2_INIT_DONE, &a->flags2)) {
+                       clear_bit(AF2_INIT_DONE, &a->flags2);
 
-                       esas2r_lock_set_flags(&a->flags,
-                                             AF_DEGRADED_MODE);
+                       set_bit(AF_DEGRADED_MODE, &a->flags);
 
                        esas2r_log_dev(ESAS2R_LOG_INFO,
                                       &(a->host->shost_gendev),
@@ -759,7 +757,7 @@ int esas2r_resume(struct pci_dev *pdev)
 
        esas2r_claim_interrupts(a);
 
-       if (a->flags2 & AF2_IRQ_CLAIMED) {
+       if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
                /*
                 * Now that system interrupt(s) are claimed, we can enable
                 * chip interrupts.
@@ -781,7 +779,7 @@ error_exit:
 
 bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
 {
-       esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+       set_bit(AF_DEGRADED_MODE, &a->flags);
        esas2r_log(ESAS2R_LOG_CRIT,
                   "setting adapter to degraded mode: %s\n", error_str);
        return false;
@@ -809,7 +807,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
        int pcie_cap_reg;
 
        pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
-       if (0xffff & pcie_cap_reg) {
+       if (pcie_cap_reg) {
                u16 devcontrol;
 
                pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@@ -896,7 +894,7 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
            && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
                a->flags2 |= AF2_THUNDERBOLT;
 
-       if (a->flags2 & AF2_THUNDERBOLT)
+       if (test_bit(AF2_THUNDERBOLT, &a->flags2))
                a->flags2 |= AF2_SERIAL_FLASH;
 
        if (a->pcid->subsystem_device == ATTO_TLSH_1068)
@@ -956,14 +954,14 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
        a->outbound_copy = (u32 volatile *)high;
        high += sizeof(u32);
 
-       if (!(a->flags & AF_NVR_VALID))
+       if (!test_bit(AF_NVR_VALID, &a->flags))
                esas2r_nvram_set_defaults(a);
 
        /* update the caller's uncached memory area pointer */
        *uncached_area = (void *)high;
 
        /* initialize the allocated memory */
-       if (a->flags & AF_FIRST_INIT) {
+       if (test_bit(AF_FIRST_INIT, &a->flags)) {
                memset(a->req_table, 0,
                       (num_requests + num_ae_requests +
                        1) * sizeof(struct esas2r_request *));
@@ -1019,7 +1017,7 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)
         * if the chip reset detected flag is set, we can bypass a bunch of
         * stuff.
         */
-       if (a->flags & AF_CHPRST_DETECTED)
+       if (test_bit(AF_CHPRST_DETECTED, &a->flags))
                goto skip_chip_reset;
 
        /*
@@ -1057,14 +1055,12 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)
                                                    doorbell);
 
                        if (ver == DRBL_FW_VER_0) {
-                               esas2r_lock_set_flags(&a->flags,
-                                                     AF_LEGACY_SGE_MODE);
+                               set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
                                a->max_vdareq_size = 128;
                                a->build_sgl = esas2r_build_sg_list_sge;
                        } else if (ver == DRBL_FW_VER_1) {
-                               esas2r_lock_clear_flags(&a->flags,
-                                                       AF_LEGACY_SGE_MODE);
+                               clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
                                a->max_vdareq_size = 1024;
                                a->build_sgl = esas2r_build_sg_list_prd;
@@ -1139,7 +1135,7 @@ skip_chip_reset:
        *a->outbound_copy =
                a->last_write =
                        a->last_read = a->list_size - 1;
-       esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+       set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
        esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
                                    a->last_write);
        esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
@@ -1204,9 +1200,9 @@ skip_chip_reset:
         */
        doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
        if (doorbell & DRBL_POWER_DOWN)
-               esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+               set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
        else
-               esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+               clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
 
        /*
         * enable assertion of outbound queue and doorbell interrupts in the
@@ -1239,8 +1235,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
                                     0,
                                     NULL);
                ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
-               ci->sgl_page_size = sgl_page_size;
-               ci->epoch_time = now.tv_sec;
+               ci->sgl_page_size = cpu_to_le32(sgl_page_size);
+               ci->epoch_time = cpu_to_le32(now.tv_sec);
                rq->flags |= RF_FAILURE_OK;
                a->init_msg = ESAS2R_INIT_MSG_INIT;
                break;
@@ -1250,12 +1246,15 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
                if (rq->req_stat == RS_SUCCESS) {
                        u32 major;
                        u32 minor;
+                       u16 fw_release;
 
                        a->fw_version = le16_to_cpu(
                                rq->func_rsp.cfg_rsp.vda_version);
                        a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
-                       major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
-                       minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
+                       fw_release = le16_to_cpu(
+                               rq->func_rsp.cfg_rsp.fw_release);
+                       major = LOBYTE(fw_release);
+                       minor = HIBYTE(fw_release);
                        a->fw_version += (major << 16) + (minor << 24);
                } else {
                        esas2r_hdebug("FAILED");
@@ -1266,9 +1265,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
                 * unsupported config requests correctly.
                 */
 
-               if ((a->flags2 & AF2_THUNDERBOLT)
-                   || (be32_to_cpu(a->fw_version) >
-                       be32_to_cpu(0x47020052))) {
+               if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
+                   || (be32_to_cpu(a->fw_version) > 0x00524702)) {
                        esas2r_hdebug("CFG get init");
                        esas2r_build_cfg_req(a,
                                             rq,
@@ -1361,10 +1359,10 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
        struct esas2r_request *rq;
        u32 i;
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                goto exit;
 
-       if (!(a->flags & AF_NVR_VALID)) {
+       if (!test_bit(AF_NVR_VALID, &a->flags)) {
                if (!esas2r_nvram_read_direct(a))
                        esas2r_log(ESAS2R_LOG_WARN,
                                   "invalid/missing NVRAM parameters");
@@ -1376,8 +1374,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
        }
 
        /* The firmware is ready. */
-       esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
-       esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+       clear_bit(AF_DEGRADED_MODE, &a->flags);
+       clear_bit(AF_CHPRST_PENDING, &a->flags);
 
        /* Post all the async event requests */
        for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
@@ -1398,8 +1396,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
 
        esas2r_hdebug("firmware revision: %s", a->fw_rev);
 
-       if ((a->flags & AF_CHPRST_DETECTED)
-           && (a->flags & AF_FIRST_INIT)) {
+       if (test_bit(AF_CHPRST_DETECTED, &a->flags)
+           && (test_bit(AF_FIRST_INIT, &a->flags))) {
                esas2r_enable_chip_interrupts(a);
                return true;
        }
@@ -1423,18 +1421,18 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
                 * Block Tasklets from getting scheduled and indicate this is
                 * polled discovery.
                 */
-               esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
-               esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
+               set_bit(AF_TASKLET_SCHEDULED, &a->flags);
+               set_bit(AF_DISC_POLLED, &a->flags);
 
                /*
                 * Temporarily bring the disable count to zero to enable
                 * deferred processing.  Note that the count is already zero
                 * after the first initialization.
                 */
-               if (a->flags & AF_FIRST_INIT)
+               if (test_bit(AF_FIRST_INIT, &a->flags))
                        atomic_dec(&a->disable_cnt);
 
-               while (a->flags & AF_DISC_PENDING) {
+               while (test_bit(AF_DISC_PENDING, &a->flags)) {
                        schedule_timeout_interruptible(msecs_to_jiffies(100));
 
                        /*
@@ -1453,7 +1451,7 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
                         * we have to make sure the timer tick processes the
                         * doorbell indicating the firmware is ready.
                         */
-                       if (!(a->flags & AF_CHPRST_PENDING))
+                       if (!test_bit(AF_CHPRST_PENDING, &a->flags))
                                esas2r_disc_check_for_work(a);
 
                        /* Simulate a timer tick. */
@@ -1473,11 +1471,11 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
 
                }
 
-               if (a->flags & AF_FIRST_INIT)
+               if (test_bit(AF_FIRST_INIT, &a->flags))
                        atomic_inc(&a->disable_cnt);
 
-               esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
-               esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+               clear_bit(AF_DISC_POLLED, &a->flags);
+               clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
        }
 
 
@@ -1504,26 +1502,26 @@ exit:
         * need to get done before we exit.
         */
 
-       if ((a->flags & AF_CHPRST_DETECTED)
-           && (a->flags & AF_FIRST_INIT)) {
+       if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
+           test_bit(AF_FIRST_INIT, &a->flags)) {
                /*
                 * Reinitialization was performed during the first
                 * initialization.  Only clear the chip reset flag so the
                 * original device polling is not cancelled.
                 */
                if (!rslt)
-                       esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+                       clear_bit(AF_CHPRST_PENDING, &a->flags);
        } else {
                /* First initialization or a subsequent re-init is complete. */
                if (!rslt) {
-                       esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
-                       esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+                       clear_bit(AF_CHPRST_PENDING, &a->flags);
+                       clear_bit(AF_DISC_PENDING, &a->flags);
                }
 
 
                /* Enable deferred processing after the first initialization. */
-               if (a->flags & AF_FIRST_INIT) {
-                       esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
+               if (test_bit(AF_FIRST_INIT, &a->flags)) {
+                       clear_bit(AF_FIRST_INIT, &a->flags);
 
                        if (atomic_dec_return(&a->disable_cnt) == 0)
                                esas2r_do_deferred_processes(a);
@@ -1535,7 +1533,7 @@ exit:
 
 void esas2r_reset_adapter(struct esas2r_adapter *a)
 {
-       esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+       set_bit(AF_OS_RESET, &a->flags);
        esas2r_local_reset_adapter(a);
        esas2r_schedule_tasklet(a);
 }
@@ -1550,17 +1548,17 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
         * dump is located in the upper 512KB of the onchip SRAM.  Make sure
         * to not overwrite a previous crash that was saved.
         */
-       if ((a->flags2 & AF2_COREDUMP_AVAIL)
-           && !(a->flags2 & AF2_COREDUMP_SAVED)) {
+       if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
+           !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
                esas2r_read_mem_block(a,
                                      a->fw_coredump_buff,
                                      MW_DATA_ADDR_SRAM + 0x80000,
                                      ESAS2R_FWCOREDUMP_SZ);
 
-               esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
+               set_bit(AF2_COREDUMP_SAVED, &a->flags2);
        }
 
-       esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+       clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
 
        /* Reset the chip */
        if (a->pcid->revision == MVR_FREY_B2)
@@ -1606,10 +1604,10 @@ static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
  */
 void esas2r_power_down(struct esas2r_adapter *a)
 {
-       esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
-       esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
+       set_bit(AF_POWER_MGT, &a->flags);
+       set_bit(AF_POWER_DOWN, &a->flags);
 
-       if (!(a->flags & AF_DEGRADED_MODE)) {
+       if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
                u32 starttime;
                u32 doorbell;
 
@@ -1649,14 +1647,14 @@ void esas2r_power_down(struct esas2r_adapter *a)
                 * For versions of firmware that support it tell them the driver
                 * is powering down.
                 */
-               if (a->flags2 & AF2_VDA_POWER_DOWN)
+               if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
                        esas2r_power_down_notify_firmware(a);
        }
 
        /* Suspend I/O processing. */
-       esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
-       esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
-       esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+       set_bit(AF_OS_RESET, &a->flags);
+       set_bit(AF_DISC_PENDING, &a->flags);
+       set_bit(AF_CHPRST_PENDING, &a->flags);
 
        esas2r_process_adapter_reset(a);
 
@@ -1673,9 +1671,9 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
 {
        bool ret;
 
-       esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
+       clear_bit(AF_POWER_DOWN, &a->flags);
        esas2r_init_pci_cfg_space(a);
-       esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
+       set_bit(AF_FIRST_INIT, &a->flags);
        atomic_inc(&a->disable_cnt);
 
        /* reinitialize the adapter */
@@ -1687,17 +1685,17 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
        esas2r_send_reset_ae(a, true);
 
        /* clear this flag after initialization. */
-       esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
+       clear_bit(AF_POWER_MGT, &a->flags);
        return ret;
 }
 
 bool esas2r_is_adapter_present(struct esas2r_adapter *a)
 {
-       if (a->flags & AF_NOT_PRESENT)
+       if (test_bit(AF_NOT_PRESENT, &a->flags))
                return false;
 
        if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
-               esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
+               set_bit(AF_NOT_PRESENT, &a->flags);
 
                return false;
        }
index c2d4ff57c5c3f0ab2381ef6b6a773cc1699b7efe..f16d6bcf9bb6db3f3c3e5b381fdb312d315cad55 100644 (file)
@@ -96,7 +96,7 @@ irqreturn_t esas2r_interrupt(int irq, void *dev_id)
        if (!esas2r_adapter_interrupt_pending(a))
                return IRQ_NONE;
 
-       esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
+       set_bit(AF2_INT_PENDING, &a->flags2);
        esas2r_schedule_tasklet(a);
 
        return IRQ_HANDLED;
@@ -317,9 +317,10 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)
         *  = 2 - can start any request
         */
 
-       if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
+       if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
+           test_bit(AF_FLASHING, &a->flags))
                startreqs = 0;
-       else if (a->flags & AF_DISC_PENDING)
+       else if (test_bit(AF_DISC_PENDING, &a->flags))
                startreqs = 1;
 
        atomic_inc(&a->disable_cnt);
@@ -367,7 +368,7 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)
                                 * Flashing could have been set by last local
                                 * start
                                 */
-                               if (a->flags & AF_FLASHING)
+                               if (test_bit(AF_FLASHING, &a->flags))
                                        break;
                        }
                }
@@ -404,7 +405,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)
 
                dc->disc_evt = 0;
 
-               esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+               clear_bit(AF_DISC_IN_PROG, &a->flags);
        }
 
        /*
@@ -425,7 +426,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)
                a->last_write =
                        a->last_read = a->list_size - 1;
 
-       esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+       set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 
        /* Kill all the requests on the active list */
        list_for_each(element, &a->defer_list) {
@@ -470,7 +471,7 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)
        if (atomic_read(&a->disable_cnt) == 0)
                esas2r_do_deferred_processes(a);
 
-       esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
+       clear_bit(AF_OS_RESET, &a->flags);
 
        esas2r_trace_exit();
 }
@@ -478,10 +479,10 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)
 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
 {
 
-       esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
-       esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
-       esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
-       esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+       clear_bit(AF_CHPRST_NEEDED, &a->flags);
+       clear_bit(AF_BUSRST_NEEDED, &a->flags);
+       clear_bit(AF_BUSRST_DETECTED, &a->flags);
+       clear_bit(AF_BUSRST_PENDING, &a->flags);
        /*
         * Make sure we don't get attempt more than 3 resets
         * when the uptime between resets does not exceed one
@@ -507,10 +508,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
                 * prevent the heartbeat from trying to recover.
                 */
 
-               esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
-               esas2r_lock_set_flags(&a->flags, AF_DISABLED);
-               esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
-               esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+               set_bit(AF_DEGRADED_MODE, &a->flags);
+               set_bit(AF_DISABLED, &a->flags);
+               clear_bit(AF_CHPRST_PENDING, &a->flags);
+               clear_bit(AF_DISC_PENDING, &a->flags);
 
                esas2r_disable_chip_interrupts(a);
                a->int_mask = 0;
@@ -519,18 +520,17 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
                esas2r_log(ESAS2R_LOG_CRIT,
                           "Adapter disabled because of hardware failure");
        } else {
-               u32 flags =
-                       esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
+               bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
 
-               if (!(flags & AF_CHPRST_STARTED))
+               if (!alrdyrst)
                        /*
                         * Only disable interrupts if this is
                         * the first reset attempt.
                         */
                        esas2r_disable_chip_interrupts(a);
 
-               if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
-                   !(flags & AF_CHPRST_STARTED)) {
+               if ((test_bit(AF_POWER_MGT, &a->flags)) &&
+                   !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
                        /*
                         * Don't reset the chip on the first
                         * deferred power up attempt.
@@ -543,10 +543,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
                /* Kick off the reinitialization */
                a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
                a->chip_init_time = jiffies_to_msecs(jiffies);
-               if (!(a->flags & AF_POWER_MGT)) {
+               if (!test_bit(AF_POWER_MGT, &a->flags)) {
                        esas2r_process_adapter_reset(a);
 
-                       if (!(flags & AF_CHPRST_STARTED)) {
+                       if (!alrdyrst) {
                                /* Remove devices now that I/O is cleaned up. */
                                a->prev_dev_cnt =
                                        esas2r_targ_db_get_tgt_cnt(a);
@@ -560,38 +560,37 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
 
 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
 {
-       while (a->flags & AF_CHPRST_DETECTED) {
+       while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
                /*
                 * Balance the enable in esas2r_initadapter_hw.
                 * Esas2r_power_down already took care of it for power
                 * management.
                 */
-               if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
-                                                       AF_POWER_MGT))
+               if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+                   !test_bit(AF_POWER_MGT, &a->flags))
                        esas2r_disable_chip_interrupts(a);
 
                /* Reinitialize the chip. */
                esas2r_check_adapter(a);
                esas2r_init_adapter_hw(a, 0);
 
-               if (a->flags & AF_CHPRST_NEEDED)
+               if (test_bit(AF_CHPRST_NEEDED, &a->flags))
                        break;
 
-               if (a->flags & AF_POWER_MGT) {
+               if (test_bit(AF_POWER_MGT, &a->flags)) {
                        /* Recovery from power management. */
-                       if (a->flags & AF_FIRST_INIT) {
+                       if (test_bit(AF_FIRST_INIT, &a->flags)) {
                                /* Chip reset during normal power up */
                                esas2r_log(ESAS2R_LOG_CRIT,
                                           "The firmware was reset during a normal power-up sequence");
                        } else {
                                /* Deferred power up complete. */
-                               esas2r_lock_clear_flags(&a->flags,
-                                                       AF_POWER_MGT);
+                               clear_bit(AF_POWER_MGT, &a->flags);
                                esas2r_send_reset_ae(a, true);
                        }
                } else {
                        /* Recovery from online chip reset. */
-                       if (a->flags & AF_FIRST_INIT) {
+                       if (test_bit(AF_FIRST_INIT, &a->flags)) {
                                /* Chip reset during driver load */
                        } else {
                                /* Chip reset after driver load */
@@ -602,14 +601,14 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
                                   "Recovering from a chip reset while the chip was online");
                }
 
-               esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
+               clear_bit(AF_CHPRST_STARTED, &a->flags);
                esas2r_enable_chip_interrupts(a);
 
                /*
                 * Clear this flag last!  this indicates that the chip has been
                 * reset already during initialization.
                 */
-               esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
+               clear_bit(AF_CHPRST_DETECTED, &a->flags);
        }
 }
 
@@ -617,26 +616,28 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
 /* Perform deferred tasks when chip interrupts are disabled */
 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
 {
-       if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
-               if (a->flags & AF_CHPRST_NEEDED)
+
+       if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
+           test_bit(AF_CHPRST_DETECTED, &a->flags)) {
+               if (test_bit(AF_CHPRST_NEEDED, &a->flags))
                        esas2r_chip_rst_needed_during_tasklet(a);
 
                esas2r_handle_chip_rst_during_tasklet(a);
        }
 
-       if (a->flags & AF_BUSRST_NEEDED) {
+       if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
                esas2r_hdebug("hard resetting bus");
 
-               esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+               clear_bit(AF_BUSRST_NEEDED, &a->flags);
 
-               if (a->flags & AF_FLASHING)
-                       esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+               if (test_bit(AF_FLASHING, &a->flags))
+                       set_bit(AF_BUSRST_DETECTED, &a->flags);
                else
                        esas2r_write_register_dword(a, MU_DOORBELL_IN,
                                                    DRBL_RESET_BUS);
        }
 
-       if (a->flags & AF_BUSRST_DETECTED) {
+       if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
                esas2r_process_bus_reset(a);
 
                esas2r_log_dev(ESAS2R_LOG_WARN,
@@ -645,14 +646,14 @@ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
 
                scsi_report_bus_reset(a->host, 0);
 
-               esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
-               esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+               clear_bit(AF_BUSRST_DETECTED, &a->flags);
+               clear_bit(AF_BUSRST_PENDING, &a->flags);
 
                esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
        }
 
-       if (a->flags & AF_PORT_CHANGE) {
-               esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
+       if (test_bit(AF_PORT_CHANGE, &a->flags)) {
+               clear_bit(AF_PORT_CHANGE, &a->flags);
 
                esas2r_targ_db_report_changes(a);
        }
@@ -672,10 +673,10 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
        esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
 
        if (doorbell & DRBL_RESET_BUS)
-               esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+               set_bit(AF_BUSRST_DETECTED, &a->flags);
 
        if (doorbell & DRBL_FORCE_INT)
-               esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+               clear_bit(AF_HEARTBEAT, &a->flags);
 
        if (doorbell & DRBL_PANIC_REASON_MASK) {
                esas2r_hdebug("*** Firmware Panic ***");
@@ -683,7 +684,7 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
        }
 
        if (doorbell & DRBL_FW_RESET) {
-               esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+               set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
                esas2r_local_reset_adapter(a);
        }
 
@@ -918,7 +919,7 @@ void esas2r_complete_request(struct esas2r_adapter *a,
 {
        if (rq->vrq->scsi.function == VDA_FUNC_FLASH
            && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
-               esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
+               clear_bit(AF_FLASHING, &a->flags);
 
        /* See if we setup a callback to do special processing */
 
index 324e2626a08beee6b358f5eb62a08dafe167458f..a8df916cd57a53eccc356413e26ffbbcda847f9c 100644 (file)
@@ -49,7 +49,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
        struct esas2r_request *startrq = rq;
        unsigned long flags;
 
-       if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
+       if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
+                    test_bit(AF_POWER_DOWN, &a->flags))) {
                if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
                        rq->req_stat = RS_SEL2;
                else
@@ -69,8 +70,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
                         * Note that if AF_DISC_PENDING is set than this will
                         * go on the defer queue.
                         */
-                       if (unlikely(t->target_state != TS_PRESENT
-                                    && !(a->flags & AF_DISC_PENDING)))
+                       if (unlikely(t->target_state != TS_PRESENT &&
+                                    !test_bit(AF_DISC_PENDING, &a->flags)))
                                rq->req_stat = RS_SEL;
                }
        }
@@ -91,8 +92,9 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
        spin_lock_irqsave(&a->queue_lock, flags);
 
        if (likely(list_empty(&a->defer_list) &&
-                  !(a->flags &
-                    (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
+                  !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+                  !test_bit(AF_FLASHING, &a->flags) &&
+                  !test_bit(AF_DISC_PENDING, &a->flags)))
                esas2r_local_start_request(a, startrq);
        else
                list_add_tail(&startrq->req_list, &a->defer_list);
@@ -124,7 +126,7 @@ void esas2r_local_start_request(struct esas2r_adapter *a,
 
        if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
                     && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
-               esas2r_lock_set_flags(&a->flags, AF_FLASHING);
+               set_bit(AF_FLASHING, &a->flags);
 
        list_add_tail(&rq->req_list, &a->active_list);
        esas2r_start_vda_request(a, rq);
@@ -147,11 +149,10 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,
        if (a->last_write >= a->list_size) {
                a->last_write = 0;
                /* update the toggle bit */
-               if (a->flags & AF_COMM_LIST_TOGGLE)
-                       esas2r_lock_clear_flags(&a->flags,
-                                               AF_COMM_LIST_TOGGLE);
+               if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
+                       clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
                else
-                       esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+                       set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
        }
 
        element =
@@ -169,7 +170,7 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,
        /* Update the write pointer */
        dw = a->last_write;
 
-       if (a->flags & AF_COMM_LIST_TOGGLE)
+       if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
                dw |= MU_ILW_TOGGLE;
 
        esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
@@ -687,18 +688,14 @@ static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
                        esas2r_write_register_dword(a, MU_DOORBELL_OUT,
                                                    doorbell);
                        if (ver == DRBL_FW_VER_0) {
-                               esas2r_lock_set_flags(&a->flags,
-                                                     AF_CHPRST_DETECTED);
-                               esas2r_lock_set_flags(&a->flags,
-                                                     AF_LEGACY_SGE_MODE);
+                               set_bit(AF_CHPRST_DETECTED, &a->flags);
+                               set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
                                a->max_vdareq_size = 128;
                                a->build_sgl = esas2r_build_sg_list_sge;
                        } else if (ver == DRBL_FW_VER_1) {
-                               esas2r_lock_set_flags(&a->flags,
-                                                     AF_CHPRST_DETECTED);
-                               esas2r_lock_clear_flags(&a->flags,
-                                                       AF_LEGACY_SGE_MODE);
+                               set_bit(AF_CHPRST_DETECTED, &a->flags);
+                               clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
 
                                a->max_vdareq_size = 1024;
                                a->build_sgl = esas2r_build_sg_list_prd;
@@ -719,28 +716,27 @@ void esas2r_timer_tick(struct esas2r_adapter *a)
        a->last_tick_time = currtime;
 
        /* count down the uptime */
-       if (a->chip_uptime
-           && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+       if (a->chip_uptime &&
+           !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+           !test_bit(AF_DISC_PENDING, &a->flags)) {
                if (deltatime >= a->chip_uptime)
                        a->chip_uptime = 0;
                else
                        a->chip_uptime -= deltatime;
        }
 
-       if (a->flags & AF_CHPRST_PENDING) {
-               if (!(a->flags & AF_CHPRST_NEEDED)
-                   && !(a->flags & AF_CHPRST_DETECTED))
+       if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
+               if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
+                   !test_bit(AF_CHPRST_DETECTED, &a->flags))
                        esas2r_handle_pending_reset(a, currtime);
        } else {
-               if (a->flags & AF_DISC_PENDING)
+               if (test_bit(AF_DISC_PENDING, &a->flags))
                        esas2r_disc_check_complete(a);
-
-               if (a->flags & AF_HEARTBEAT_ENB) {
-                       if (a->flags & AF_HEARTBEAT) {
+               if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
+                       if (test_bit(AF_HEARTBEAT, &a->flags)) {
                                if ((currtime - a->heartbeat_time) >=
                                    ESAS2R_HEARTBEAT_TIME) {
-                                       esas2r_lock_clear_flags(&a->flags,
-                                                               AF_HEARTBEAT);
+                                       clear_bit(AF_HEARTBEAT, &a->flags);
                                        esas2r_hdebug("heartbeat failed");
                                        esas2r_log(ESAS2R_LOG_CRIT,
                                                   "heartbeat failed");
@@ -748,7 +744,7 @@ void esas2r_timer_tick(struct esas2r_adapter *a)
                                        esas2r_local_reset_adapter(a);
                                }
                        } else {
-                               esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
+                               set_bit(AF_HEARTBEAT, &a->flags);
                                a->heartbeat_time = currtime;
                                esas2r_force_interrupt(a);
                        }
@@ -812,7 +808,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
        rqaux->vrq->scsi.flags |=
                cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
 
-       if (a->flags & AF_FLASHING) {
+       if (test_bit(AF_FLASHING, &a->flags)) {
                /* Assume success.  if there are active requests, return busy */
                rqaux->req_stat = RS_SUCCESS;
 
@@ -831,7 +827,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
 
        spin_unlock_irqrestore(&a->queue_lock, flags);
 
-       if (!(a->flags & AF_FLASHING))
+       if (!test_bit(AF_FLASHING, &a->flags))
                esas2r_start_request(a, rqaux);
 
        esas2r_comp_list_drain(a, &comp_list);
@@ -848,11 +844,12 @@ void esas2r_reset_bus(struct esas2r_adapter *a)
 {
        esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
 
-       if (!(a->flags & AF_DEGRADED_MODE)
-           && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
-               esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
-               esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
-               esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+       if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
+           !test_bit(AF_CHPRST_PENDING, &a->flags) &&
+           !test_bit(AF_DISC_PENDING, &a->flags)) {
+               set_bit(AF_BUSRST_NEEDED, &a->flags);
+               set_bit(AF_BUSRST_PENDING, &a->flags);
+               set_bit(AF_OS_RESET, &a->flags);
 
                esas2r_schedule_tasklet(a);
        }
index e5b09027e066406daceab374ff299ec2d83f7d38..d89a0277a8e189f02af169c0d3476267dc8dc135 100644 (file)
@@ -347,7 +347,7 @@ static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
 {
        struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return false;
 
        esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
@@ -463,7 +463,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
                gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
                gcc->bios_build_rev = LOWORD(a->flash_ver);
 
-               if (a->flags2 & AF2_THUNDERLINK)
+               if (test_bit(AF2_THUNDERLINK, &a->flags2))
                        gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
                                           | CSMI_CNTLRF_SATA_HBA;
                else
@@ -485,7 +485,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
        {
                struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
 
-               if (a->flags & AF_DEGRADED_MODE)
+               if (test_bit(AF_DEGRADED_MODE, &a->flags))
                        gcs->status = CSMI_CNTLR_STS_FAILED;
                else
                        gcs->status = CSMI_CNTLR_STS_GOOD;
@@ -819,10 +819,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
 
                gai->adap_type = ATTO_GAI_AT_ESASRAID2;
 
-               if (a->flags2 & AF2_THUNDERLINK)
+               if (test_bit(AF2_THUNDERLINK, &a->flags2))
                        gai->adap_type = ATTO_GAI_AT_TLSASHBA;
 
-               if (a->flags & AF_DEGRADED_MODE)
+               if (test_bit(AF_DEGRADED_MODE, &a->flags))
                        gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
 
                gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
@@ -938,7 +938,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
                                u32 total_len = ESAS2R_FWCOREDUMP_SZ;
 
                                /* Size is zero if a core dump isn't present */
-                               if (!(a->flags2 & AF2_COREDUMP_SAVED))
+                               if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
                                        total_len = 0;
 
                                if (len > total_len)
@@ -960,8 +960,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
                                memset(a->fw_coredump_buff, 0,
                                       ESAS2R_FWCOREDUMP_SZ);
 
-                               esas2r_lock_clear_flags(&a->flags2,
-                                                       AF2_COREDUMP_SAVED);
+                               clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
                        } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
                                hi->status = ATTO_STS_UNSUPPORTED;
                                break;
@@ -973,7 +972,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
                        trc->total_length = ESAS2R_FWCOREDUMP_SZ;
 
                        /* Return zero length buffer if core dump not present */
-                       if (!(a->flags2 & AF2_COREDUMP_SAVED))
+                       if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
                                trc->total_length = 0;
                } else {
                        hi->status = ATTO_STS_UNSUPPORTED;
@@ -1048,6 +1047,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
                else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
                        rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
 
+
                if (!esas2r_build_sg_list(a, rq, sgc)) {
                        hi->status = ATTO_STS_OUT_OF_RSRC;
                        break;
@@ -1139,15 +1139,15 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
                        break;
                }
 
-               if (a->flags & AF_CHPRST_NEEDED)
+               if (test_bit(AF_CHPRST_NEEDED, &a->flags))
                        ac->adap_state = ATTO_AC_AS_RST_SCHED;
-               else if (a->flags & AF_CHPRST_PENDING)
+               else if (test_bit(AF_CHPRST_PENDING, &a->flags))
                        ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
-               else if (a->flags & AF_DISC_PENDING)
+               else if (test_bit(AF_DISC_PENDING, &a->flags))
                        ac->adap_state = ATTO_AC_AS_RST_DISC;
-               else if (a->flags & AF_DISABLED)
+               else if (test_bit(AF_DISABLED, &a->flags))
                        ac->adap_state = ATTO_AC_AS_DISABLED;
-               else if (a->flags & AF_DEGRADED_MODE)
+               else if (test_bit(AF_DEGRADED_MODE, &a->flags))
                        ac->adap_state = ATTO_AC_AS_DEGRADED;
                else
                        ac->adap_state = ATTO_AC_AS_OK;
index 4abf1272e1ebd191a8cf033267b0a8d4fcf0913c..f37f3e3dd5d5af98a3744f74b93761183d847f29 100644 (file)
@@ -889,7 +889,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        /* Assume success, if it fails we will fix the result later. */
        cmd->result = DID_OK << 16;
 
-       if (unlikely(a->flags & AF_DEGRADED_MODE)) {
+       if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
                cmd->result = DID_NO_CONNECT << 16;
                cmd->scsi_done(cmd);
                return 0;
@@ -1050,7 +1050,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
 
        esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
 
-       if (a->flags & AF_DEGRADED_MODE) {
+       if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
                cmd->result = DID_ABORT << 16;
 
                scsi_set_resid(cmd, 0);
@@ -1131,7 +1131,7 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
        struct esas2r_adapter *a =
                (struct esas2r_adapter *)cmd->device->host->hostdata;
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return FAILED;
 
        if (host_reset)
@@ -1141,14 +1141,14 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
 
        /* above call sets the AF_OS_RESET flag.  wait for it to clear. */
 
-       while (a->flags & AF_OS_RESET) {
+       while (test_bit(AF_OS_RESET, &a->flags)) {
                msleep(10);
 
-               if (a->flags & AF_DEGRADED_MODE)
+               if (test_bit(AF_DEGRADED_MODE, &a->flags))
                        return FAILED;
        }
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return FAILED;
 
        return SUCCESS;
@@ -1176,7 +1176,7 @@ static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
        u8 task_management_status = RS_PENDING;
        bool completed;
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return FAILED;
 
 retry:
@@ -1229,7 +1229,7 @@ retry:
                        msleep(10);
        }
 
-       if (a->flags & AF_DEGRADED_MODE)
+       if (test_bit(AF_DEGRADED_MODE, &a->flags))
                return FAILED;
 
        if (task_management_status == RS_BUSY) {
@@ -1666,13 +1666,13 @@ void esas2r_adapter_tasklet(unsigned long context)
 {
        struct esas2r_adapter *a = (struct esas2r_adapter *)context;
 
-       if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
-               esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
+       if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) {
+               clear_bit(AF2_TIMER_TICK, &a->flags2);
                esas2r_timer_tick(a);
        }
 
-       if (likely(a->flags2 & AF2_INT_PENDING)) {
-               esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
+       if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) {
+               clear_bit(AF2_INT_PENDING, &a->flags2);
                esas2r_adapter_interrupt(a);
        }
 
@@ -1680,12 +1680,12 @@ void esas2r_adapter_tasklet(unsigned long context)
                esas2r_do_tasklet_tasks(a);
 
        if (esas2r_is_tasklet_pending(a)
-           || (a->flags2 & AF2_INT_PENDING)
-           || (a->flags2 & AF2_TIMER_TICK)) {
-               esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+           || (test_bit(AF2_INT_PENDING, &a->flags2))
+           || (test_bit(AF2_TIMER_TICK, &a->flags2))) {
+               clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
                esas2r_schedule_tasklet(a);
        } else {
-               esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+               clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
        }
 }
 
@@ -1707,7 +1707,7 @@ static void esas2r_timer_callback(unsigned long context)
 {
        struct esas2r_adapter *a = (struct esas2r_adapter *)context;
 
-       esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
+       set_bit(AF2_TIMER_TICK, &a->flags2);
 
        esas2r_schedule_tasklet(a);
 
index e540a2fa3d1509e59ba6d1594a343a4536f29b37..bf45beaad439a9c38cad60d9162b7d1be4701e1b 100644 (file)
@@ -86,7 +86,7 @@ void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
 
        esas2r_trace_enter();
 
-       if (a->flags & AF_DISC_PENDING) {
+       if (test_bit(AF_DISC_PENDING, &a->flags)) {
                esas2r_trace_exit();
                return;
        }
index fd1392879647c63f6d2709db2d2033981ea87b5d..30028e56df63790e7f8ae1a9b6caec3a729fc966 100644 (file)
@@ -84,7 +84,7 @@ bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
                return false;
        }
 
-       if (a->flags & AF_DEGRADED_MODE) {
+       if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
                vi->status = ATTO_STS_DEGRADED;
                return false;
        }
@@ -310,9 +310,9 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
                                le32_to_cpu(rsp->vda_version);
                        cfg->data.init.fw_build = rsp->fw_build;
 
-                       snprintf(buf, sizeof(buf), "%1d.%02d",
-                               (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
-                               (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+                       snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
+                                (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+                                (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
 
                        memcpy(&cfg->data.init.fw_release, buf,
                               sizeof(cfg->data.init.fw_release));
@@ -389,7 +389,7 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a,
        vrq->length = cpu_to_le32(length);
 
        if (vrq->length) {
-               if (a->flags & AF_LEGACY_SGE_MODE) {
+               if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
                        vrq->sg_list_offset = (u8)offsetof(
                                struct atto_vda_mgmt_req, sge);
 
@@ -427,7 +427,7 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
 
        vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
 
-       if (a->flags & AF_LEGACY_SGE_MODE) {
+       if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
                vrq->sg_list_offset =
                        (u8)offsetof(struct atto_vda_ae_req, sge);
                vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
index e4dd3d7cd236520a35487e5acabb77559ec78f30..528d43b7b569eceee49b65a6a19d06150c55283c 100644 (file)
@@ -27,6 +27,7 @@
 #include "fnic_io.h"
 #include "fnic_res.h"
 #include "fnic_trace.h"
+#include "fnic_stats.h"
 #include "vnic_dev.h"
 #include "vnic_wq.h"
 #include "vnic_rq.h"
@@ -38,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.23"
+#define DRV_VERSION            "1.5.0.45"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -232,6 +233,13 @@ struct fnic {
        unsigned int wq_count;
        unsigned int cq_count;
 
+       struct dentry *fnic_stats_debugfs_host;
+       struct dentry *fnic_stats_debugfs_file;
+       struct dentry *fnic_reset_debugfs_file;
+       unsigned int reset_stats;
+       atomic64_t io_cmpl_skip;
+       struct fnic_stats fnic_stats;
+
        u32 vlan_hw_insert:1;           /* let hw insert the tag */
        u32 in_remove:1;                /* fnic device in removal */
        u32 stop_rx_link_events:1;      /* stop proc. rx frames, link events */
index cbcb0121c84de5557852e641bcff6248881480e2..b6073f875761bf49b4828decd33bc1a8aafe8ead 100644 (file)
 static struct dentry *fnic_trace_debugfs_root;
 static struct dentry *fnic_trace_debugfs_file;
 static struct dentry *fnic_trace_enable;
+static struct dentry *fnic_stats_debugfs_root;
+
+/*
+ * fnic_debugfs_init - Initialize debugfs for fnic debug logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory and statistics directory for trace buffer and
+ * stats logging.
+ */
+int fnic_debugfs_init(void)
+{
+       int rc = -1;
+       fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+       if (!fnic_trace_debugfs_root) {
+               printk(KERN_DEBUG "Cannot create debugfs root\n");
+               return rc;
+       }
+
+       if (!fnic_trace_debugfs_root) {
+               printk(KERN_DEBUG
+                       "fnic root directory doesn't exist in debugfs\n");
+               return rc;
+       }
+
+       fnic_stats_debugfs_root = debugfs_create_dir("statistics",
+                                               fnic_trace_debugfs_root);
+       if (!fnic_stats_debugfs_root) {
+               printk(KERN_DEBUG "Cannot create Statistics directory\n");
+               return rc;
+       }
+
+       rc = 0;
+       return rc;
+}
+
+/*
+ * fnic_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic.
+ */
+void fnic_debugfs_terminate(void)
+{
+       debugfs_remove(fnic_stats_debugfs_root);
+       fnic_stats_debugfs_root = NULL;
+
+       debugfs_remove(fnic_trace_debugfs_root);
+       fnic_trace_debugfs_root = NULL;
+}
 
 /*
  * fnic_trace_ctrl_open - Open the trace_enable file
@@ -241,16 +293,16 @@ static const struct file_operations fnic_trace_debugfs_fops = {
  * Description:
  * When Debugfs is configured this routine sets up the fnic debugfs
  * file system. If not already created, this routine will create the
- * fnic directory. It will create file trace to log fnic trace buffer
- * output into debugfs and it will also create file trace_enable to
- * control enable/disable of trace logging into trace buffer.
+ * create file trace to log fnic trace buffer output into debugfs and
+ * it will also create file trace_enable to control enable/disable of
+ * trace logging into trace buffer.
  */
 int fnic_trace_debugfs_init(void)
 {
        int rc = -1;
-       fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
        if (!fnic_trace_debugfs_root) {
-               printk(KERN_DEBUG "Cannot create debugfs root\n");
+               printk(KERN_DEBUG
+                       "FNIC Debugfs root directory doesn't exist\n");
                return rc;
        }
        fnic_trace_enable = debugfs_create_file("tracing_enable",
@@ -259,8 +311,8 @@ int fnic_trace_debugfs_init(void)
                                          NULL, &fnic_trace_ctrl_fops);
 
        if (!fnic_trace_enable) {
-               printk(KERN_DEBUG "Cannot create trace_enable file"
-                                 " under debugfs");
+               printk(KERN_DEBUG
+                       "Cannot create trace_enable file under debugfs\n");
                return rc;
        }
 
@@ -271,7 +323,8 @@ int fnic_trace_debugfs_init(void)
                                                  &fnic_trace_debugfs_fops);
 
        if (!fnic_trace_debugfs_file) {
-               printk(KERN_DEBUG "Cannot create trace file under debugfs");
+               printk(KERN_DEBUG
+                       "Cannot create trace file under debugfs\n");
                return rc;
        }
        rc = 0;
@@ -295,8 +348,323 @@ void fnic_trace_debugfs_terminate(void)
                debugfs_remove(fnic_trace_enable);
                fnic_trace_enable = NULL;
        }
-       if (fnic_trace_debugfs_root) {
-               debugfs_remove(fnic_trace_debugfs_root);
-               fnic_trace_debugfs_root = NULL;
+}
+
+/*
+ * fnic_reset_stats_open - Open the reset_stats file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the stats reset flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file reset_stats and stores i_private data
+ * to debug structure to retrieve later for while performing other
+ * file oprations.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_reset_stats_open(struct inode *inode, struct file *file)
+{
+       struct stats_debug_info *debug;
+
+       debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+       if (!debug)
+               return -ENOMEM;
+
+       debug->i_private = inode->i_private;
+
+       file->private_data = debug;
+
+       return 0;
+}
+
+/*
+ * fnic_reset_stats_read - Read a reset_stats debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable reset_stats
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_reset_stats_read(struct file *file,
+                                       char __user *ubuf,
+                                       size_t cnt, loff_t *ppos)
+{
+       struct stats_debug_info *debug = file->private_data;
+       struct fnic *fnic = (struct fnic *)debug->i_private;
+       char buf[64];
+       int len;
+
+       len = sprintf(buf, "%u\n", fnic->reset_stats);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_reset_stats_write - Write to reset_stats debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * resets cumulative stats of fnic.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_reset_stats_write(struct file *file,
+                                       const char __user *ubuf,
+                                       size_t cnt, loff_t *ppos)
+{
+       struct stats_debug_info *debug = file->private_data;
+       struct fnic *fnic = (struct fnic *)debug->i_private;
+       struct fnic_stats *stats = &fnic->fnic_stats;
+       u64 *io_stats_p = (u64 *)&stats->io_stats;
+       u64 *fw_stats_p = (u64 *)&stats->fw_stats;
+       char buf[64];
+       unsigned long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       fnic->reset_stats = val;
+
+       if (fnic->reset_stats) {
+               /* Skip variable is used to avoid descrepancies to Num IOs
+                * and IO Completions stats. Skip incrementing No IO Compls
+                * for pending active IOs after reset stats
+                */
+               atomic64_set(&fnic->io_cmpl_skip,
+                       atomic64_read(&stats->io_stats.active_ios));
+               memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
+               memset(&stats->term_stats, 0,
+                       sizeof(struct terminate_stats));
+               memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
+               memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
+               memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
+               memset(io_stats_p+1, 0,
+                       sizeof(struct io_path_stats) - sizeof(u64));
+               memset(fw_stats_p+1, 0,
+                       sizeof(struct fw_stats) - sizeof(u64));
        }
+
+       (*ppos)++;
+       return cnt;
+}
+
+/*
+ * fnic_reset_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_reset_stats_release(struct inode *inode,
+                                       struct file *file)
+{
+       struct stats_debug_info *debug = file->private_data;
+       kfree(debug);
+       return 0;
+}
+
+/*
+ * fnic_stats_debugfs_open - Open the stats file for specific host
+ * and get fnic stats.
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the specific host statistics.
+ *
+ * Description:
+ * This routine opens a debugsfs file stats of specific host and print
+ * fnic stats.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_stats_debugfs_open(struct inode *inode,
+                                       struct file *file)
+{
+       struct fnic *fnic = inode->i_private;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+       struct stats_debug_info *debug;
+       int buf_size = 2 * PAGE_SIZE;
+
+       debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
+       if (!debug)
+               return -ENOMEM;
+
+       debug->debug_buffer = vmalloc(buf_size);
+       if (!debug->debug_buffer) {
+               kfree(debug);
+               return -ENOMEM;
+       }
+
+       debug->buf_size = buf_size;
+       memset((void *)debug->debug_buffer, 0, buf_size);
+       debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
+
+       file->private_data = debug;
+
+       return 0;
+}
+
+/*
+ * fnic_stats_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_stats_debugfs_read(struct file *file,
+                                       char __user *ubuf,
+                                       size_t nbytes,
+                                       loff_t *pos)
+{
+       struct stats_debug_info *debug = file->private_data;
+       int rc = 0;
+       rc = simple_read_from_buffer(ubuf, nbytes, pos,
+                                       debug->debug_buffer,
+                                       debug->buffer_len);
+       return rc;
+}
+
+/*
+ * fnic_stats_stats_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_stats_debugfs_release(struct inode *inode,
+                                       struct file *file)
+{
+       struct stats_debug_info *debug = file->private_data;
+       vfree(debug->debug_buffer);
+       kfree(debug);
+       return 0;
+}
+
+static const struct file_operations fnic_stats_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_stats_debugfs_open,
+       .read = fnic_stats_debugfs_read,
+       .release = fnic_stats_debugfs_release,
+};
+
+static const struct file_operations fnic_reset_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_reset_stats_open,
+       .read = fnic_reset_stats_read,
+       .write = fnic_reset_stats_write,
+       .release = fnic_reset_stats_release,
+};
+
+/*
+ * fnic_stats_init - Initialize stats struct and create stats file per fnic
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the stats file per fnic
+ * It will create file stats and reset_stats under statistics/host# directory
+ * to log per fnic stats.
+ */
+int fnic_stats_debugfs_init(struct fnic *fnic)
+{
+       int rc = -1;
+       char name[16];
+
+       snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
+
+       if (!fnic_stats_debugfs_root) {
+               printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
+               return rc;
+       }
+       fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
+                                               fnic_stats_debugfs_root);
+       if (!fnic->fnic_stats_debugfs_host) {
+               printk(KERN_DEBUG "Cannot create host directory\n");
+               return rc;
+       }
+
+       fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
+                                               S_IFREG|S_IRUGO|S_IWUSR,
+                                               fnic->fnic_stats_debugfs_host,
+                                               fnic,
+                                               &fnic_stats_debugfs_fops);
+       if (!fnic->fnic_stats_debugfs_file) {
+               printk(KERN_DEBUG "Cannot create host stats file\n");
+               return rc;
+       }
+
+       fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
+                                               S_IFREG|S_IRUGO|S_IWUSR,
+                                               fnic->fnic_stats_debugfs_host,
+                                               fnic,
+                                               &fnic_reset_debugfs_fops);
+       if (!fnic->fnic_reset_debugfs_file) {
+               printk(KERN_DEBUG "Cannot create host stats file\n");
+               return rc;
+       }
+       rc = 0;
+       return rc;
+}
+
+/*
+ * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic stats.
+ */
+void fnic_stats_debugfs_remove(struct fnic *fnic)
+{
+       if (!fnic)
+               return;
+
+       debugfs_remove(fnic->fnic_stats_debugfs_file);
+       fnic->fnic_stats_debugfs_file = NULL;
+
+       debugfs_remove(fnic->fnic_reset_debugfs_file);
+       fnic->fnic_reset_debugfs_file = NULL;
+
+       debugfs_remove(fnic->fnic_stats_debugfs_host);
+       fnic->fnic_stats_debugfs_host = NULL;
 }
index 006fa92a02df0b4178f2ce88c73dffd8635cb5fc..60a1c50aa68fd5f49cae463e30ee4389897d6b9f 100644 (file)
@@ -302,6 +302,7 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
 {
        struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        struct sk_buff *skb;
        char *eth_fr;
        int fr_len;
@@ -337,6 +338,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
        vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
        vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
        put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+       atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
 
        skb_put(skb, sizeof(*vlan));
        skb->protocol = htons(ETH_P_FIP);
@@ -354,6 +356,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
        struct fcoe_ctlr *fip = &fnic->ctlr;
        struct fip_header *fiph;
        struct fip_desc *desc;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        u16 vid;
        size_t rlen;
        size_t dlen;
@@ -402,6 +405,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
        /* any VLAN descriptors present ? */
        if (list_empty(&fnic->vlans)) {
                /* retry from timer */
+               atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
                FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
                          "No VLAN descriptors in FIP VLAN response\n");
                spin_unlock_irqrestore(&fnic->vlans_lock, flags);
@@ -533,6 +537,7 @@ drop:
 void fnic_handle_fip_frame(struct work_struct *work)
 {
        struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        unsigned long flags;
        struct sk_buff *skb;
        struct ethhdr *eh;
@@ -567,6 +572,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
                         * fcf's & restart from scratch
                         */
                        if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+                               atomic64_inc(
+                                       &fnic_stats->vlan_stats.flogi_rejects);
                                shost_printk(KERN_INFO, fnic->lport->host,
                                          "Trigger a Link down - VLAN Disc\n");
                                fcoe_ctlr_link_down(&fnic->ctlr);
@@ -753,6 +760,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
        struct fnic *fnic = vnic_dev_priv(rq->vdev);
        struct sk_buff *skb;
        struct fc_frame *fp;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        unsigned int eth_hdrs_stripped;
        u8 type, color, eop, sop, ingress_port, vlan_stripped;
        u8 fcoe = 0, fcoe_sof, fcoe_eof;
@@ -803,6 +811,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
                eth_hdrs_stripped = 0;
                skb_trim(skb, bytes_written);
                if (!fcs_ok) {
+                       atomic64_inc(&fnic_stats->misc_stats.frame_errors);
                        FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                     "fcs error.  dropping packet.\n");
                        goto drop;
@@ -818,6 +827,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
        }
 
        if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
+               atomic64_inc(&fnic_stats->misc_stats.frame_errors);
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                             "fnic rq_cmpl fcoe x%x fcsok x%x"
                             " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
@@ -1205,6 +1215,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
 {
        unsigned long flags;
        struct fcoe_vlan *vlan;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        u64 sol_time;
 
        spin_lock_irqsave(&fnic->fnic_lock, flags);
@@ -1273,6 +1284,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
                        vlan->state = FIP_VLAN_SENT; /* sent now */
                }
                spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
                vlan->sol_count++;
                sol_time = jiffies + msecs_to_jiffies
                                        (FCOE_CTLR_START_DELAY);
index 5c1f223cabce314408e6549e9340a5a25c2b0505..7d9b54ae7f62f826f503fb8a72870cb1a093321f 100644 (file)
@@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
        if (!pba)
                return IRQ_NONE;
 
+       fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+       atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
        if (pba & (1 << FNIC_INTX_NOTIFY)) {
                vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
                fnic_handle_link_event(fnic);
@@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
        struct fnic *fnic = data;
        unsigned long work_done = 0;
 
+       fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+       atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
        work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
        work_done += fnic_wq_cmpl_handler(fnic, -1);
        work_done += fnic_rq_cmpl_handler(fnic, -1);
@@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
        struct fnic *fnic = data;
        unsigned long rq_work_done = 0;
 
+       fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+       atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
        rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
        vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
                                 rq_work_done,
@@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
        struct fnic *fnic = data;
        unsigned long wq_work_done = 0;
 
+       fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+       atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
        wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
        vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
                                 wq_work_done,
@@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
        struct fnic *fnic = data;
        unsigned long wq_copy_work_done = 0;
 
+       fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+       atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
        wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
        vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
                                 wq_copy_work_done,
@@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
 {
        struct fnic *fnic = data;
 
+       fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
+       atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
+
        vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
        fnic_log_q_error(fnic);
        fnic_handle_link_event(fnic);
index 889b594fe8b0d0cd2f7409613e8271e4c7cbdccd..33e4ec2bfe734eefda1cd049bfde7f1112cf0ea2 100644 (file)
@@ -556,6 +556,13 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        host->transportt = fnic_fc_transport;
 
+       err = fnic_stats_debugfs_init(fnic);
+       if (err) {
+               shost_printk(KERN_ERR, fnic->lport->host,
+                               "Failed to initialize debugfs for stats\n");
+               fnic_stats_debugfs_remove(fnic);
+       }
+
        /* Setup PCI resources */
        pci_set_drvdata(pdev, fnic);
 
@@ -917,6 +924,7 @@ err_out_release_regions:
 err_out_disable_device:
        pci_disable_device(pdev);
 err_out_free_hba:
+       fnic_stats_debugfs_remove(fnic);
        scsi_host_put(lp->host);
 err_out:
        return err;
@@ -969,6 +977,7 @@ static void fnic_remove(struct pci_dev *pdev)
 
        fcoe_ctlr_destroy(&fnic->ctlr);
        fc_lport_destroy(lp);
+       fnic_stats_debugfs_remove(fnic);
 
        /*
         * This stops the fnic device, masks all interrupts. Completed
@@ -1013,6 +1022,14 @@ static int __init fnic_init_module(void)
 
        printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
 
+       /* Create debugfs entries for fnic */
+       err = fnic_debugfs_init();
+       if (err < 0) {
+               printk(KERN_ERR PFX "Failed to create fnic directory "
+                               "for tracing and stats logging\n");
+               fnic_debugfs_terminate();
+       }
+
        /* Allocate memory for trace buffer */
        err = fnic_trace_buf_init();
        if (err < 0) {
@@ -1101,6 +1118,7 @@ err_create_fnic_sgl_slab_max:
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 err_create_fnic_sgl_slab_dflt:
        fnic_trace_free();
+       fnic_debugfs_terminate();
        return err;
 }
 
@@ -1117,6 +1135,7 @@ static void __exit fnic_cleanup_module(void)
        kmem_cache_destroy(fnic_io_req_cache);
        fc_release_transport(fnic_fc_transport);
        fnic_trace_free();
+       fnic_debugfs_terminate();
 }
 
 module_init(fnic_init_module);
index d014aae19134624824190305040df9ee37fcbf1d..0521436d05d64b64e108416f2702141d2e564240 100644 (file)
@@ -226,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic)
 
        if (!vnic_wq_copy_desc_avail(wq))
                ret = -EAGAIN;
-       else
+       else {
                fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
+               atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+               if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+                         atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+                       atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+                               atomic64_read(
+                                 &fnic->fnic_stats.fw_stats.active_fw_reqs));
+       }
 
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
 
-       if (!ret)
+       if (!ret) {
+               atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Issued fw reset\n");
-       else {
+       else {
                fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Failed to issue fw reset\n");
@@ -291,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
                              fc_id, fnic->ctlr.map_dest, gw_mac);
        }
 
+       atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+       if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+                 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+               atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+                 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
 flogi_reg_ioreq_end:
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
        return ret;
@@ -310,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
        struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
        struct fc_rport_libfc_priv *rp = rport->dd_data;
        struct host_sg_desc *desc;
+       struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
        u8 pri_tag = 0;
        unsigned int i;
        unsigned long intr_flags;
@@ -358,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
                spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
                FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
                          "fnic_queue_wq_copy_desc failure - no descriptors\n");
+               atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
@@ -386,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
                                         rport->maxframe_size, rp->r_a_tov,
                                         rp->e_d_tov);
 
+       atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+       if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+                 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+               atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+                 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
        return 0;
 }
@@ -401,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        struct fc_rport *rport;
        struct fnic_io_req *io_req = NULL;
        struct fnic *fnic = lport_priv(lp);
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        struct vnic_wq_copy *wq;
        int ret;
        u64 cmd_trace;
@@ -414,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        rport = starget_to_rport(scsi_target(sc->device));
        ret = fc_remote_port_chkready(rport);
        if (ret) {
+               atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
                sc->result = ret;
                done(sc);
                return 0;
@@ -436,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        /* Get a new io_req for this SCSI IO */
        io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
        if (!io_req) {
+               atomic64_inc(&fnic_stats->io_stats.alloc_failures);
                ret = SCSI_MLQUEUE_HOST_BUSY;
                goto out;
        }
@@ -462,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                        mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
                                      GFP_ATOMIC);
                if (!io_req->sgl_list) {
+                       atomic64_inc(&fnic_stats->io_stats.alloc_failures);
                        ret = SCSI_MLQUEUE_HOST_BUSY;
                        scsi_dma_unmap(sc);
                        mempool_free(io_req, fnic->io_req_pool);
@@ -509,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
                        mempool_free(io_req, fnic->io_req_pool);
                }
        } else {
+               atomic64_inc(&fnic_stats->io_stats.active_ios);
+               atomic64_inc(&fnic_stats->io_stats.num_ios);
+               if (atomic64_read(&fnic_stats->io_stats.active_ios) >
+                         atomic64_read(&fnic_stats->io_stats.max_active_ios))
+                       atomic64_set(&fnic_stats->io_stats.max_active_ios,
+                            atomic64_read(&fnic_stats->io_stats.active_ios));
+
                /* REVISIT: Use per IO lock in the final code */
                CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
        }
@@ -542,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
        struct fcpio_tag tag;
        int ret = 0;
        unsigned long flags;
+       struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
 
        fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
 
+       atomic64_inc(&reset_stats->fw_reset_completions);
+
        /* Clean up all outstanding io requests */
        fnic_cleanup_io(fnic, SCSI_NO_TAG);
 
+       atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
+       atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+
        spin_lock_irqsave(&fnic->fnic_lock, flags);
 
        /* fnic should be in FC_TRANS_ETH_MODE */
@@ -571,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
                         * reset the firmware. Free the cached flogi
                         */
                        fnic->state = FNIC_IN_FC_MODE;
+                       atomic64_inc(&reset_stats->fw_reset_failures);
                        ret = -1;
                }
        } else {
@@ -578,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
                              fnic->lport->host,
                              "Unexpected state %s while processing"
                              " reset cmpl\n", fnic_state_to_str(fnic->state));
+               atomic64_inc(&reset_stats->fw_reset_failures);
                ret = -1;
        }
 
@@ -701,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
        wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
        spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
 
+       fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
        if (is_ack_index_in_range(wq, request_out)) {
                fnic->fw_ack_index[0] = request_out;
                fnic->fw_ack_recd[0] = 1;
-       }
+       } else
+               atomic64_inc(
+                       &fnic->fnic_stats.misc_stats.ack_index_out_of_range);
+
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
        FNIC_TRACE(fnic_fcpio_ack_handler,
                  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
@@ -726,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        struct fcpio_icmnd_cmpl *icmnd_cmpl;
        struct fnic_io_req *io_req;
        struct scsi_cmnd *sc;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
        unsigned long flags;
        spinlock_t *io_lock;
        u64 cmd_trace;
@@ -746,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        sc = scsi_host_find_tag(fnic->lport->host, id);
        WARN_ON_ONCE(!sc);
        if (!sc) {
+               atomic64_inc(&fnic_stats->io_stats.sc_null);
                shost_printk(KERN_ERR, fnic->lport->host,
                          "icmnd_cmpl sc is null - "
                          "hdr status = %s tag = 0x%x desc = 0x%p\n",
@@ -766,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        WARN_ON_ONCE(!io_req);
        if (!io_req) {
+               atomic64_inc(&fnic_stats->io_stats.ioreq_null);
                CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
                spin_unlock_irqrestore(io_lock, flags);
                shost_printk(KERN_ERR, fnic->lport->host,
@@ -824,31 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
                if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
                        xfer_len -= icmnd_cmpl->residual;
 
+               if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
+                       atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
                break;
 
        case FCPIO_TIMEOUT:          /* request was timed out */
+               atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
                sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
                break;
 
        case FCPIO_ABORTED:          /* request was aborted */
+               atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
                break;
 
        case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
+               atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
                scsi_set_resid(sc, icmnd_cmpl->residual);
                sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
                break;
 
        case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
+               atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
                sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
                break;
-       case FCPIO_INVALID_HEADER:   /* header contains invalid data */
-       case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
-       case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
+
        case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
+               atomic64_inc(&fnic_stats->io_stats.io_not_found);
+               sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+               break;
+
        case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
-       case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
+               atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
+               sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+               break;
+
        case FCPIO_FW_ERR:           /* request was terminated due fw error */
+               atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
+               sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+               break;
+
+       case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
+               atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
+               sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
+               break;
+
+       case FCPIO_INVALID_HEADER:   /* header contains invalid data */
+       case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
+       case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
        default:
                shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
                             fnic_fcpio_status_to_str(hdr_status));
@@ -856,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
                break;
        }
 
+       if (hdr_status != FCPIO_SUCCESS) {
+               atomic64_inc(&fnic_stats->io_stats.io_failures);
+               shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+                            fnic_fcpio_status_to_str(hdr_status));
+       }
        /* Break link with the SCSI command */
        CMD_SP(sc) = NULL;
        CMD_FLAGS(sc) |= FNIC_IO_DONE;
@@ -889,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
        } else
                fnic->lport->host_stats.fcp_control_requests++;
 
+       atomic64_dec(&fnic_stats->io_stats.active_ios);
+       if (atomic64_read(&fnic->io_cmpl_skip))
+               atomic64_dec(&fnic->io_cmpl_skip);
+       else
+               atomic64_inc(&fnic_stats->io_stats.io_completions);
+
        /* Call SCSI completion function to complete the IO */
        if (sc->scsi_done)
                sc->scsi_done(sc);
@@ -906,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
        u32 id;
        struct scsi_cmnd *sc;
        struct fnic_io_req *io_req;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
+       struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
+       struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
+       struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
        unsigned long flags;
        spinlock_t *io_lock;
        unsigned long start_time;
@@ -923,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
        sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
        WARN_ON_ONCE(!sc);
        if (!sc) {
+               atomic64_inc(&fnic_stats->io_stats.sc_null);
                shost_printk(KERN_ERR, fnic->lport->host,
                          "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
                          fnic_fcpio_status_to_str(hdr_status), id);
@@ -933,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        WARN_ON_ONCE(!io_req);
        if (!io_req) {
+               atomic64_inc(&fnic_stats->io_stats.ioreq_null);
                spin_unlock_irqrestore(io_lock, flags);
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
                shost_printk(KERN_ERR, fnic->lport->host,
@@ -957,6 +1045,31 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
                spin_unlock_irqrestore(io_lock, flags);
        } else if (id & FNIC_TAG_ABORT) {
                /* Completion of abort cmd */
+               switch (hdr_status) {
+               case FCPIO_SUCCESS:
+                       break;
+               case FCPIO_TIMEOUT:
+                       if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+                               atomic64_inc(&abts_stats->abort_fw_timeouts);
+                       else
+                               atomic64_inc(
+                                       &term_stats->terminate_fw_timeouts);
+                       break;
+               case FCPIO_IO_NOT_FOUND:
+                       if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+                               atomic64_inc(&abts_stats->abort_io_not_found);
+                       else
+                               atomic64_inc(
+                                       &term_stats->terminate_io_not_found);
+                       break;
+               default:
+                       if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+                               atomic64_inc(&abts_stats->abort_failures);
+                       else
+                               atomic64_inc(
+                                       &term_stats->terminate_failures);
+                       break;
+               }
                if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
                        /* This is a late completion. Ignore it */
                        spin_unlock_irqrestore(io_lock, flags);
@@ -964,6 +1077,16 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
                }
                CMD_ABTS_STATUS(sc) = hdr_status;
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+
+               atomic64_dec(&fnic_stats->io_stats.active_ios);
+               if (atomic64_read(&fnic->io_cmpl_skip))
+                       atomic64_dec(&fnic->io_cmpl_skip);
+               else
+                       atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+               if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+                       atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
+
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "abts cmpl recd. id %d status %s\n",
                              (int)(id & FNIC_TAG_MASK),
@@ -1066,6 +1189,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
 {
        struct fnic *fnic = vnic_dev_priv(vdev);
 
+       switch (desc->hdr.type) {
+       case FCPIO_ICMND_CMPL: /* fw completed a command */
+       case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
+       case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
+       case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
+       case FCPIO_RESET_CMPL: /* fw completed reset */
+               atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+               break;
+       default:
+               break;
+       }
+
        switch (desc->hdr.type) {
        case FCPIO_ACK: /* fw copied copy wq desc to its queue */
                fnic_fcpio_ack_handler(fnic, cq_index, desc);
@@ -1126,6 +1261,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
        struct scsi_cmnd *sc;
        spinlock_t *io_lock;
        unsigned long start_time = 0;
+       struct fnic_stats *fnic_stats = &fnic->fnic_stats;
 
        for (i = 0; i < fnic->fnic_max_tag_id; i++) {
                if (i == exclude_id)
@@ -1179,6 +1315,11 @@ cleanup_scsi_cmd:
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
                              " DID_TRANSPORT_DISRUPTED\n");
 
+               if (atomic64_read(&fnic->io_cmpl_skip))
+                       atomic64_dec(&fnic->io_cmpl_skip);
+               else
+                       atomic64_inc(&fnic_stats->io_stats.io_completions);
+
                /* Complete the command to SCSI */
                if (sc->scsi_done) {
                        FNIC_TRACE(fnic_cleanup_io,
@@ -1262,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
 {
        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
        struct Scsi_Host *host = fnic->lport->host;
+       struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
        unsigned long flags;
 
        spin_lock_irqsave(host->host_lock, flags);
@@ -1283,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
                atomic_dec(&fnic->in_flight);
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                        "fnic_queue_abort_io_req: failure: no descriptors\n");
+               atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
                return 1;
        }
        fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
                                     0, task_req, tag, fc_lun, io_req->port_id,
                                     fnic->config.ra_tov, fnic->config.ed_tov);
 
+       atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+       if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+                 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+               atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+                 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
        atomic_dec(&fnic->in_flight);
 
@@ -1299,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
 {
        int tag;
        int abt_tag;
+       int term_cnt = 0;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
        struct scsi_cmnd *sc;
+       struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
+       struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
        struct scsi_lun fc_lun;
        enum fnic_ioreq_state old_ioreq_state;
 
@@ -1366,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       atomic64_inc(&reset_stats->device_reset_terminates);
                        abt_tag = (tag | FNIC_TAG_DEV_RST);
                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                        "fnic_rport_exch_reset dev rst sc 0x%p\n",
@@ -1402,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
                        else
                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
                        spin_unlock_irqrestore(io_lock, flags);
+                       atomic64_inc(&term_stats->terminates);
+                       term_cnt++;
                }
        }
+       if (term_cnt > atomic64_read(&term_stats->max_terminates))
+               atomic64_set(&term_stats->max_terminates, term_cnt);
 
 }
 
@@ -1411,6 +1568,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
 {
        int tag;
        int abt_tag;
+       int term_cnt = 0;
        struct fnic_io_req *io_req;
        spinlock_t *io_lock;
        unsigned long flags;
@@ -1420,6 +1578,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
        struct fc_lport *lport;
        struct fnic *fnic;
        struct fc_rport *cmd_rport;
+       struct reset_stats *reset_stats;
+       struct terminate_stats *term_stats;
        enum fnic_ioreq_state old_ioreq_state;
 
        if (!rport) {
@@ -1448,6 +1608,9 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
        if (fnic->in_remove)
                return;
 
+       reset_stats = &fnic->fnic_stats.reset_stats;
+       term_stats = &fnic->fnic_stats.term_stats;
+
        for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
                abt_tag = tag;
                io_lock = fnic_io_lock_tag(fnic, tag);
@@ -1504,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
                CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
                if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+                       atomic64_inc(&reset_stats->device_reset_terminates);
                        abt_tag = (tag | FNIC_TAG_DEV_RST);
                        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                        "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
@@ -1540,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
                        else
                                CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
                        spin_unlock_irqrestore(io_lock, flags);
+                       atomic64_inc(&term_stats->terminates);
+                       term_cnt++;
                }
        }
+       if (term_cnt > atomic64_read(&term_stats->max_terminates))
+               atomic64_set(&term_stats->max_terminates, term_cnt);
 
 }
 
@@ -1562,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        int ret = SUCCESS;
        u32 task_req = 0;
        struct scsi_lun fc_lun;
+       struct fnic_stats *fnic_stats;
+       struct abort_stats *abts_stats;
+       struct terminate_stats *term_stats;
        int tag;
        DECLARE_COMPLETION_ONSTACK(tm_done);
 
@@ -1572,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        lp = shost_priv(sc->device->host);
 
        fnic = lport_priv(lp);
+       fnic_stats = &fnic->fnic_stats;
+       abts_stats = &fnic->fnic_stats.abts_stats;
+       term_stats = &fnic->fnic_stats.term_stats;
+
        rport = starget_to_rport(scsi_target(sc->device));
        tag = sc->request->tag;
        FNIC_SCSI_DBG(KERN_DEBUG,
@@ -1630,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
         */
        if (fc_remote_port_chkready(rport) == 0)
                task_req = FCPIO_ITMF_ABT_TASK;
-       else
+       else {
+               atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
                task_req = FCPIO_ITMF_ABT_TASK_TERM;
+       }
 
        /* Now queue the abort command to firmware */
        int_to_scsilun(sc->device->lun, &fc_lun);
@@ -1646,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
                ret = FAILED;
                goto fnic_abort_cmd_end;
        }
-       if (task_req == FCPIO_ITMF_ABT_TASK)
+       if (task_req == FCPIO_ITMF_ABT_TASK) {
                CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
-       else
+               atomic64_inc(&fnic_stats->abts_stats.aborts);
+       } else {
                CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+               atomic64_inc(&fnic_stats->term_stats.terminates);
+       }
 
        /*
         * We queued an abort IO, wait for its completion.
@@ -1667,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
 
        io_req = (struct fnic_io_req *)CMD_SP(sc);
        if (!io_req) {
+               atomic64_inc(&fnic_stats->io_stats.ioreq_null);
                spin_unlock_irqrestore(io_lock, flags);
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
                ret = FAILED;
@@ -1677,6 +1858,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        /* fw did not complete abort, timed out */
        if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
                spin_unlock_irqrestore(io_lock, flags);
+               if (task_req == FCPIO_ITMF_ABT_TASK) {
+                       FNIC_SCSI_DBG(KERN_INFO,
+                               fnic->lport->host, "Abort Driver Timeout\n");
+                       atomic64_inc(&abts_stats->abort_drv_timeouts);
+               } else {
+                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+                               "Terminate Driver Timeout\n");
+                       atomic64_inc(&term_stats->terminate_drv_timeouts);
+               }
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
                ret = FAILED;
                goto fnic_abort_cmd_end;
@@ -1721,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
 {
        struct vnic_wq_copy *wq = &fnic->wq_copy[0];
        struct Scsi_Host *host = fnic->lport->host;
+       struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
        struct scsi_lun fc_lun;
        int ret = 0;
        unsigned long intr_flags;
@@ -1742,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
        if (!vnic_wq_copy_desc_avail(wq)) {
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                          "queue_dr_io_req failure - no descriptors\n");
+               atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
                ret = -EAGAIN;
                goto lr_io_req_end;
        }
@@ -1754,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
                                     fc_lun.scsi_lun, io_req->port_id,
                                     fnic->config.ra_tov, fnic->config.ed_tov);
 
+       atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
+       if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
+                 atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
+               atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
+                 atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
+
 lr_io_req_end:
        spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
        atomic_dec(&fnic->in_flight);
@@ -1988,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        unsigned long flags;
        unsigned long start_time = 0;
        struct scsi_lun fc_lun;
+       struct fnic_stats *fnic_stats;
+       struct reset_stats *reset_stats;
        int tag = 0;
        DECLARE_COMPLETION_ONSTACK(tm_done);
        int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
@@ -1999,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        lp = shost_priv(sc->device->host);
 
        fnic = lport_priv(lp);
+       fnic_stats = &fnic->fnic_stats;
+       reset_stats = &fnic->fnic_stats.reset_stats;
+
+       atomic64_inc(&reset_stats->device_resets);
 
        rport = starget_to_rport(scsi_target(sc->device));
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2009,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
                goto fnic_device_reset_end;
 
        /* Check if remote port up */
-       if (fc_remote_port_chkready(rport))
+       if (fc_remote_port_chkready(rport)) {
+               atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
                goto fnic_device_reset_end;
+       }
 
        CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
        /* Allocate tag if not present */
@@ -2086,6 +2292,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
         * gets cleaned up during higher levels of EH
         */
        if (status == FCPIO_INVALID_CODE) {
+               atomic64_inc(&reset_stats->device_reset_timeouts);
                FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                              "Device reset timed out\n");
                CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
@@ -2199,6 +2406,10 @@ fnic_device_reset_end:
                      "Returning from device reset %s\n",
                      (ret == SUCCESS) ?
                      "SUCCESS" : "FAILED");
+
+       if (ret == FAILED)
+               atomic64_inc(&reset_stats->device_reset_failures);
+
        return ret;
 }
 
@@ -2207,26 +2418,34 @@ int fnic_reset(struct Scsi_Host *shost)
 {
        struct fc_lport *lp;
        struct fnic *fnic;
-       int ret = SUCCESS;
+       int ret = 0;
+       struct reset_stats *reset_stats;
 
        lp = shost_priv(shost);
        fnic = lport_priv(lp);
+       reset_stats = &fnic->fnic_stats.reset_stats;
 
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                      "fnic_reset called\n");
 
+       atomic64_inc(&reset_stats->fnic_resets);
+
        /*
         * Reset local port, this will clean up libFC exchanges,
         * reset remote port sessions, and if link is up, begin flogi
         */
-       if (lp->tt.lport_reset(lp))
-               ret = FAILED;
+       ret = lp->tt.lport_reset(lp);
 
        FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
                      "Returning from fnic reset %s\n",
-                     (ret == SUCCESS) ?
+                     (ret == 0) ?
                      "SUCCESS" : "FAILED");
 
+       if (ret == 0)
+               atomic64_inc(&reset_stats->fnic_reset_completions);
+       else
+               atomic64_inc(&reset_stats->fnic_reset_failures);
+
        return ret;
 }
 
@@ -2251,7 +2470,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
         * scsi-ml tries to send a TUR to every device if host reset is
         * successful, so before returning to scsi, fabric should be up
         */
-       ret = fnic_reset(shost);
+       ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
        if (ret == SUCCESS) {
                wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
                ret = FAILED;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
new file mode 100644 (file)
index 0000000..540cceb
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _FNIC_STATS_H_
+#define _FNIC_STATS_H_
+struct io_path_stats {
+       atomic64_t active_ios;
+       atomic64_t max_active_ios;
+       atomic64_t io_completions;
+       atomic64_t io_failures;
+       atomic64_t ioreq_null;
+       atomic64_t alloc_failures;
+       atomic64_t sc_null;
+       atomic64_t io_not_found;
+       atomic64_t num_ios;
+};
+
+struct abort_stats {
+       atomic64_t aborts;
+       atomic64_t abort_failures;
+       atomic64_t abort_drv_timeouts;
+       atomic64_t abort_fw_timeouts;
+       atomic64_t abort_io_not_found;
+};
+
+struct terminate_stats {
+       atomic64_t terminates;
+       atomic64_t max_terminates;
+       atomic64_t terminate_drv_timeouts;
+       atomic64_t terminate_fw_timeouts;
+       atomic64_t terminate_io_not_found;
+       atomic64_t terminate_failures;
+};
+
+struct reset_stats {
+       atomic64_t device_resets;
+       atomic64_t device_reset_failures;
+       atomic64_t device_reset_aborts;
+       atomic64_t device_reset_timeouts;
+       atomic64_t device_reset_terminates;
+       atomic64_t fw_resets;
+       atomic64_t fw_reset_completions;
+       atomic64_t fw_reset_failures;
+       atomic64_t fnic_resets;
+       atomic64_t fnic_reset_completions;
+       atomic64_t fnic_reset_failures;
+};
+
+struct fw_stats {
+       atomic64_t active_fw_reqs;
+       atomic64_t max_fw_reqs;
+       atomic64_t fw_out_of_resources;
+       atomic64_t io_fw_errs;
+};
+
+struct vlan_stats {
+       atomic64_t vlan_disc_reqs;
+       atomic64_t resp_withno_vlanID;
+       atomic64_t sol_expiry_count;
+       atomic64_t flogi_rejects;
+};
+
+struct misc_stats {
+       u64 last_isr_time;
+       u64 last_ack_time;
+       atomic64_t isr_count;
+       atomic64_t max_cq_entries;
+       atomic64_t ack_index_out_of_range;
+       atomic64_t data_count_mismatch;
+       atomic64_t fcpio_timeout;
+       atomic64_t fcpio_aborted;
+       atomic64_t sgl_invalid;
+       atomic64_t mss_invalid;
+       atomic64_t abts_cpwq_alloc_failures;
+       atomic64_t devrst_cpwq_alloc_failures;
+       atomic64_t io_cpwq_alloc_failures;
+       atomic64_t no_icmnd_itmf_cmpls;
+       atomic64_t queue_fulls;
+       atomic64_t rport_not_ready;
+       atomic64_t frame_errors;
+};
+
+struct fnic_stats {
+       struct io_path_stats io_stats;
+       struct abort_stats abts_stats;
+       struct terminate_stats term_stats;
+       struct reset_stats reset_stats;
+       struct fw_stats fw_stats;
+       struct vlan_stats vlan_stats;
+       struct misc_stats misc_stats;
+};
+
+struct stats_debug_info {
+       char *debug_buffer;
+       void *i_private;
+       int buf_size;
+       int buffer_len;
+};
+
+int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
+int fnic_stats_debugfs_init(struct fnic *);
+void fnic_stats_debugfs_remove(struct fnic *);
+#endif /* _FNIC_STATS_H_ */
index 23a60e3d8527295266f370d3c9dc242762aa508a..e002e7187dc0d14972b7146ce9ea15b1e5dbdd4e 100644 (file)
@@ -188,6 +188,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
        return len;
 }
 
+/*
+ * fnic_get_stats_data - Copy fnic stats buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
+ *
+ * Description:
+ * This routine gathers the fnic stats debugfs data from the fnic_stats struct
+ * and dumps it to stats_debug_info.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into
+ * stats_debug_info
+ */
+int fnic_get_stats_data(struct stats_debug_info *debug,
+                       struct fnic_stats *stats)
+{
+       int len = 0;
+       int buf_size = debug->buf_size;
+       struct timespec val1, val2;
+
+       len = snprintf(debug->debug_buffer + len, buf_size - len,
+                 "------------------------------------------\n"
+                 "\t\tIO Statistics\n"
+                 "------------------------------------------\n");
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
+                 "Number of IOs: %lld\nNumber of IO Completions: %lld\n"
+                 "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
+                 "Number of Memory alloc Failures: %lld\n"
+                 "Number of IOREQ Null: %lld\n"
+                 "Number of SCSI cmd pointer Null: %lld\n",
+                 (u64)atomic64_read(&stats->io_stats.active_ios),
+                 (u64)atomic64_read(&stats->io_stats.max_active_ios),
+                 (u64)atomic64_read(&stats->io_stats.num_ios),
+                 (u64)atomic64_read(&stats->io_stats.io_completions),
+                 (u64)atomic64_read(&stats->io_stats.io_failures),
+                 (u64)atomic64_read(&stats->io_stats.io_not_found),
+                 (u64)atomic64_read(&stats->io_stats.alloc_failures),
+                 (u64)atomic64_read(&stats->io_stats.ioreq_null),
+                 (u64)atomic64_read(&stats->io_stats.sc_null));
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "\n------------------------------------------\n"
+                 "\t\tAbort Statistics\n"
+                 "------------------------------------------\n");
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Number of Aborts: %lld\n"
+                 "Number of Abort Failures: %lld\n"
+                 "Number of Abort Driver Timeouts: %lld\n"
+                 "Number of Abort FW Timeouts: %lld\n"
+                 "Number of Abort IO NOT Found: %lld\n",
+                 (u64)atomic64_read(&stats->abts_stats.aborts),
+                 (u64)atomic64_read(&stats->abts_stats.abort_failures),
+                 (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
+                 (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
+                 (u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "\n------------------------------------------\n"
+                 "\t\tTerminate Statistics\n"
+                 "------------------------------------------\n");
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Number of Terminates: %lld\n"
+                 "Maximum Terminates: %lld\n"
+                 "Number of Terminate Driver Timeouts: %lld\n"
+                 "Number of Terminate FW Timeouts: %lld\n"
+                 "Number of Terminate IO NOT Found: %lld\n"
+                 "Number of Terminate Failures: %lld\n",
+                 (u64)atomic64_read(&stats->term_stats.terminates),
+                 (u64)atomic64_read(&stats->term_stats.max_terminates),
+                 (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
+                 (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
+                 (u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
+                 (u64)atomic64_read(&stats->term_stats.terminate_failures));
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "\n------------------------------------------\n"
+                 "\t\tReset Statistics\n"
+                 "------------------------------------------\n");
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Number of Device Resets: %lld\n"
+                 "Number of Device Reset Failures: %lld\n"
+                 "Number of Device Reset Aborts: %lld\n"
+                 "Number of Device Reset Timeouts: %lld\n"
+                 "Number of Device Reset Terminates: %lld\n"
+                 "Number of FW Resets: %lld\n"
+                 "Number of FW Reset Completions: %lld\n"
+                 "Number of FW Reset Failures: %lld\n"
+                 "Number of Fnic Reset: %lld\n"
+                 "Number of Fnic Reset Completions: %lld\n"
+                 "Number of Fnic Reset Failures: %lld\n",
+                 (u64)atomic64_read(&stats->reset_stats.device_resets),
+                 (u64)atomic64_read(&stats->reset_stats.device_reset_failures),
+                 (u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
+                 (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
+                 (u64)atomic64_read(
+                         &stats->reset_stats.device_reset_terminates),
+                 (u64)atomic64_read(&stats->reset_stats.fw_resets),
+                 (u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
+                 (u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
+                 (u64)atomic64_read(&stats->reset_stats.fnic_resets),
+                 (u64)atomic64_read(
+                         &stats->reset_stats.fnic_reset_completions),
+                 (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "\n------------------------------------------\n"
+                 "\t\tFirmware Statistics\n"
+                 "------------------------------------------\n");
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Number of Active FW Requests %lld\n"
+                 "Maximum FW Requests: %lld\n"
+                 "Number of FW out of resources: %lld\n"
+                 "Number of FW IO errors: %lld\n",
+                 (u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
+                 (u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
+                 (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
+                 (u64)atomic64_read(&stats->fw_stats.io_fw_errs));
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "\n------------------------------------------\n"
+                 "\t\tVlan Discovery Statistics\n"
+                 "------------------------------------------\n");
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Number of Vlan Discovery Requests Sent %lld\n"
+                 "Vlan Response Received with no FCF VLAN ID: %lld\n"
+                 "No solicitations recvd after vlan set, expiry count: %lld\n"
+                 "Flogi rejects count: %lld\n",
+                 (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
+                 (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
+                 (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
+                 (u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "\n------------------------------------------\n"
+                 "\t\tOther Important Statistics\n"
+                 "------------------------------------------\n");
+
+       jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
+       jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+
+       len += snprintf(debug->debug_buffer + len, buf_size - len,
+                 "Last ISR time: %llu (%8lu.%8lu)\n"
+                 "Last ACK time: %llu (%8lu.%8lu)\n"
+                 "Number of ISRs: %lld\n"
+                 "Maximum CQ Entries: %lld\n"
+                 "Number of ACK index out of range: %lld\n"
+                 "Number of data count mismatch: %lld\n"
+                 "Number of FCPIO Timeouts: %lld\n"
+                 "Number of FCPIO Aborted: %lld\n"
+                 "Number of SGL Invalid: %lld\n"
+                 "Number of Copy WQ Alloc Failures for ABTs: %lld\n"
+                 "Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
+                 "Number of Copy WQ Alloc Failures for IOs: %lld\n"
+                 "Number of no icmnd itmf Completions: %lld\n"
+                 "Number of QUEUE Fulls: %lld\n"
+                 "Number of rport not ready: %lld\n"
+                 "Number of receive frame errors: %lld\n",
+                 (u64)stats->misc_stats.last_isr_time,
+                 val1.tv_sec, val1.tv_nsec,
+                 (u64)stats->misc_stats.last_ack_time,
+                 val2.tv_sec, val2.tv_nsec,
+                 (u64)atomic64_read(&stats->misc_stats.isr_count),
+                 (u64)atomic64_read(&stats->misc_stats.max_cq_entries),
+                 (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
+                 (u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
+                 (u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
+                 (u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
+                 (u64)atomic64_read(&stats->misc_stats.sgl_invalid),
+                 (u64)atomic64_read(
+                         &stats->misc_stats.abts_cpwq_alloc_failures),
+                 (u64)atomic64_read(
+                         &stats->misc_stats.devrst_cpwq_alloc_failures),
+                 (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
+                 (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
+                 (u64)atomic64_read(&stats->misc_stats.queue_fulls),
+                 (u64)atomic64_read(&stats->misc_stats.rport_not_ready),
+                 (u64)atomic64_read(&stats->misc_stats.frame_errors));
+
+       return len;
+
+}
+
 /*
  * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
  *
index cef42b4c4d6ca2c6b73bb81e3f1a8d1f73888d30..d412f2ee3c4fec83ee58b52ec5b7370658e39fdf 100644 (file)
@@ -84,7 +84,8 @@ fnic_trace_data_t *fnic_trace_get_buf(void);
 int fnic_get_trace_data(fnic_dbgfs_t *);
 int fnic_trace_buf_init(void);
 void fnic_trace_free(void);
+int fnic_debugfs_init(void);
+void fnic_debugfs_terminate(void);
 int fnic_trace_debugfs_init(void);
 void fnic_trace_debugfs_terminate(void);
-
 #endif
index df72d4a58385f2c7f4e83b83d8cac84264e97cea..22f6432eb4755a20af732e8c4d4060e9fbb9e909 100644 (file)
@@ -100,7 +100,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
-       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1920},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
index 9e2588a6881cb72751ece0f0d2092f2e6e214353..add6d1566ec8331aa5e27963e13fefa74a117ffd 100644 (file)
@@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
        struct iscsi_conn *conn = sk->sk_user_data;
 
        if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
+           (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
            !atomic_read(&sk->sk_rmem_alloc)) {
                ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
                iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
index b92aec989d60fed0e78c7ad61723529d8b0943da..82134d20e2d88ac4b9092236eb26556869270c8f 100644 (file)
@@ -2629,7 +2629,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
                                rspiocbq,
                                (phba->fc_ratov * 2)
                                + LPFC_DRVR_TIMEOUT);
-       if (iocb_stat) {
+       if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
                ret_val = -EIO;
                goto err_get_xri_exit;
        }
@@ -3204,8 +3204,9 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
                                             rspiocbq, (phba->fc_ratov * 2) +
                                             LPFC_DRVR_TIMEOUT);
 
-       if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
-                                          (rsp->ulpStatus != IOCB_SUCCESS))) {
+       if ((iocb_stat != IOCB_SUCCESS) ||
+           ((phba->sli_rev < LPFC_SLI_REV4) &&
+            (rsp->ulpStatus != IOSTAT_SUCCESS))) {
                lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
                                "3126 Failed loopback test issue iocb: "
                                "iocb_stat:x%x\n", iocb_stat);
index 7801601aa5d9e1977904124b2e9220cda8d66465..883ea2d9f237028e60dedf2e9bd828fca84bd63b 100644 (file)
@@ -4171,8 +4171,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        NLP_INT_NODE_ACT(ndlp);
        atomic_set(&ndlp->cmd_pending, 0);
        ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
-       if (vport->phba->sli_rev == LPFC_SLI_REV4)
-               ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
 }
 
 struct lpfc_nodelist *
@@ -4217,6 +4215,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_initialize_node(vport, ndlp, did);
 
        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+       if (vport->phba->sli_rev == LPFC_SLI_REV4)
+               ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
 
        if (state != NLP_STE_UNUSED_NODE)
                lpfc_nlp_set_state(vport, ndlp, state);
@@ -5617,6 +5618,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        lpfc_initialize_node(vport, ndlp, did);
        INIT_LIST_HEAD(&ndlp->nlp_listp);
+       if (vport->phba->sli_rev == LPFC_SLI_REV4)
+               ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
+
 
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
                "node init:       did:x%x",
index 086c3f28caa68351017586d10953db49d5d640dc..5464b116d3287b77796e580a62e3ecbc7243167d 100644 (file)
@@ -3439,7 +3439,8 @@ struct els_request64_wqe {
 #define els_req64_hopcnt_SHIFT      24
 #define els_req64_hopcnt_MASK       0x000000ff
 #define els_req64_hopcnt_WORD       word13
-       uint32_t reserved[2];
+       uint32_t word14;
+       uint32_t max_response_payload_len;
 };
 
 struct xmit_els_rsp64_wqe {
@@ -3554,7 +3555,8 @@ struct gen_req64_wqe {
        uint32_t relative_offset;
        struct wqe_rctl_dfctl wge_ctl; /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
-       uint32_t rsvd_12_15[4];
+       uint32_t rsvd_12_14[3];
+       uint32_t max_response_payload_len;
 };
 
 struct create_xri_wqe {
@@ -3584,7 +3586,13 @@ struct abort_cmd_wqe {
 
 struct fcp_iwrite64_wqe {
        struct ulp_bde64 bde;
-       uint32_t payload_offset_len;
+       uint32_t word3;
+#define        cmd_buff_len_SHIFT  16
+#define        cmd_buff_len_MASK  0x00000ffff
+#define        cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
        uint32_t total_xfer_len;
        uint32_t initial_xfer_len;
        struct wqe_common wqe_com;     /* words 6-11 */
@@ -3594,7 +3602,13 @@ struct fcp_iwrite64_wqe {
 
 struct fcp_iread64_wqe {
        struct ulp_bde64 bde;
-       uint32_t payload_offset_len;   /* word 3 */
+       uint32_t word3;
+#define        cmd_buff_len_SHIFT  16
+#define        cmd_buff_len_MASK  0x00000ffff
+#define        cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
        uint32_t total_xfer_len;       /* word 4 */
        uint32_t rsrvd5;               /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
@@ -3604,7 +3618,13 @@ struct fcp_iread64_wqe {
 
 struct fcp_icmnd64_wqe {
        struct ulp_bde64 bde;          /* words 0-2 */
-       uint32_t rsrvd3;               /* word 3 */
+       uint32_t word3;
+#define        cmd_buff_len_SHIFT  16
+#define        cmd_buff_len_MASK  0x00000ffff
+#define        cmd_buff_len_WORD  word3
+#define payload_offset_len_SHIFT 0
+#define payload_offset_len_MASK 0x0000ffff
+#define payload_offset_len_WORD word3
        uint32_t rsrvd4;               /* word 4 */
        uint32_t rsrvd5;               /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
index 3dfd38ce806c0df6f8196814886bd836b0dc18f6..68c94cc85c35da9dd6e4ebab0052867f5c07c01f 100644 (file)
@@ -4545,7 +4545,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
        pci_save_state(pdev);
 
        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
-       if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
+       if (pci_is_pcie(pdev))
                pdev->needs_freset = 1;
 
        return 0;
index c913e8cc3b269ca82096efd6e356dc8e9e635de9..b2ede05a5f0a61bbdeb3ffea4d3d7c263262031d 100644 (file)
@@ -1012,20 +1012,25 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                        break;
                }
 
-               /* Allocate iotag for psb->cur_iocbq. */
-               iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
-               if (iotag == 0) {
+
+               lxri = lpfc_sli4_next_xritag(phba);
+               if (lxri == NO_XRI) {
                        pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
-                               psb->data, psb->dma_handle);
+                             psb->data, psb->dma_handle);
                        kfree(psb);
                        break;
                }
 
-               lxri = lpfc_sli4_next_xritag(phba);
-               if (lxri == NO_XRI) {
+               /* Allocate iotag for psb->cur_iocbq. */
+               iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+               if (iotag == 0) {
                        pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
-                             psb->data, psb->dma_handle);
+                               psb->data, psb->dma_handle);
                        kfree(psb);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                                       "3368 Failed to allocated IOTAG for"
+                                       " XRI:0x%x\n", lxri);
+                       lpfc_sli4_free_xri(phba, lxri);
                        break;
                }
                psb->cur_iocbq.sli4_lxritag = lxri;
@@ -4485,9 +4490,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
                piocb->ulpContext =
                  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
        }
-       if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
-               piocb->ulpFCP2Rcvy = 1;
-       }
+       piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
        piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
 
        /* ulpTimeout is only one byte */
@@ -4981,6 +4984,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
        }
 }
 
+
+/**
+ * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ *
+ * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
+ *
+ * Return code :
+ *   0x2003 - Error
+ *   0x2002 - Success
+ **/
+static int
+lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
+       uint32_t rsp_info;
+       uint32_t rsp_len;
+       uint8_t  rsp_info_code;
+       int ret = FAILED;
+
+
+       if (fcprsp == NULL)
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                                "0703 fcp_rsp is missing\n");
+       else {
+               rsp_info = fcprsp->rspStatus2;
+               rsp_len = be32_to_cpu(fcprsp->rspRspLen);
+               rsp_info_code = fcprsp->rspInfo3;
+
+
+               lpfc_printf_vlog(vport, KERN_INFO,
+                                LOG_FCP,
+                                "0706 fcp_rsp valid 0x%x,"
+                                " rsp len=%d code 0x%x\n",
+                                rsp_info,
+                                rsp_len, rsp_info_code);
+
+               if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
+                       switch (rsp_info_code) {
+                       case RSP_NO_FAILURE:
+                               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                                                "0715 Task Mgmt No Failure\n");
+                               ret = SUCCESS;
+                               break;
+                       case RSP_TM_NOT_SUPPORTED: /* TM rejected */
+                               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                                                "0716 Task Mgmt Target "
+                                               "reject\n");
+                               break;
+                       case RSP_TM_NOT_COMPLETED: /* TM failed */
+                               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                                                "0717 Task Mgmt Target "
+                                               "failed TM\n");
+                               break;
+                       case RSP_TM_INVALID_LU: /* TM to invalid LU! */
+                               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                                                "0718 Task Mgmt to invalid "
+                                               "LUN\n");
+                               break;
+                       }
+               }
+       }
+       return ret;
+}
+
+
 /**
  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
  * @vport: The virtual port for which this call is being executed.
@@ -5042,12 +5112,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
 
        status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
                                          iocbq, iocbqrsp, lpfc_cmd->timeout);
-       if (status != IOCB_SUCCESS) {
-               if (status == IOCB_TIMEDOUT) {
-                       ret = TIMEOUT_ERROR;
-               } else
-                       ret = FAILED;
-               lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+       if ((status != IOCB_SUCCESS) ||
+           (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
                         "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
                         "iocb_flag x%x\n",
@@ -5055,9 +5121,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
                         tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
                         iocbqrsp->iocb.un.ulpWord[4],
                         iocbq->iocb_flag);
-       } else if (status == IOCB_BUSY)
-               ret = FAILED;
-       else
+               /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
+               if (status == IOCB_SUCCESS) {
+                       if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+                               /* Something in the FCP_RSP was invalid.
+                                * Check conditions */
+                               ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
+                       else
+                               ret = FAILED;
+               } else if (status == IOCB_TIMEDOUT) {
+                       ret = TIMEOUT_ERROR;
+               } else {
+                       ret = FAILED;
+               }
+               lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
+       } else
                ret = SUCCESS;
 
        lpfc_sli_release_iocbq(phba, iocbqrsp);
@@ -5181,7 +5259,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
        unsigned tgt_id = cmnd->device->id;
        unsigned int lun_id = cmnd->device->lun;
        struct lpfc_scsi_event_header scsi_event;
-       int status, ret = SUCCESS;
+       int status;
 
        if (!rdata) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5222,9 +5300,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
         * So, continue on.
         * We will report success if all the i/o aborts successfully.
         */
-       ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+       if (status == SUCCESS)
+               status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
                                                LPFC_CTX_LUN);
-       return ret;
+
+       return status;
 }
 
 /**
@@ -5248,7 +5328,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
        unsigned tgt_id = cmnd->device->id;
        unsigned int lun_id = cmnd->device->lun;
        struct lpfc_scsi_event_header scsi_event;
-       int status, ret = SUCCESS;
+       int status;
 
        if (!rdata) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5289,9 +5369,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
         * So, continue on.
         * We will report success if all the i/o aborts successfully.
         */
-       ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+       if (status == SUCCESS)
+               status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
                                          LPFC_CTX_TGT);
-       return ret;
+       return status;
 }
 
 /**
index b1d9f7fcb911b31a2a7fc609e97e901174a753bb..852ff7def4938c4078b961f8f6d883542fcaea2c 100644 (file)
@@ -73,6 +73,7 @@ struct fcp_rsp {
 #define RSP_RO_MISMATCH_ERR  0x03
 #define RSP_TM_NOT_SUPPORTED 0x04      /* Task mgmt function not supported */
 #define RSP_TM_NOT_COMPLETED 0x05      /* Task mgmt function not performed */
+#define RSP_TM_INVALID_LU    0x09      /* Task mgmt function to invalid LU */
 
        uint32_t rspInfoRsvd;   /* FCP_RSP_INFO bytes 4-7 (reserved) */
 
index 612f48973ff2c2c7c7f5e7f571a42a0b74b1f30f..8f580fda443f9de493544ac5f75ac381f5990db9 100644 (file)
@@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
                                       int);
 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
                        uint32_t);
+static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
+static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
 
 static IOCB_t *
 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -6566,6 +6568,108 @@ lpfc_mbox_timeout(unsigned long ptr)
        return;
 }
 
+/**
+ * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
+ *                                    are pending
+ * @phba: Pointer to HBA context object.
+ *
+ * This function checks if any mailbox completions are present on the mailbox
+ * completion queue.
+ **/
+bool
+lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
+{
+
+       uint32_t idx;
+       struct lpfc_queue *mcq;
+       struct lpfc_mcqe *mcqe;
+       bool pending_completions = false;
+
+       if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+               return false;
+
+       /* Check for completions on mailbox completion queue */
+
+       mcq = phba->sli4_hba.mbx_cq;
+       idx = mcq->hba_index;
+       while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
+               mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
+               if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
+                   (!bf_get_le32(lpfc_trailer_async, mcqe))) {
+                       pending_completions = true;
+                       break;
+               }
+               idx = (idx + 1) % mcq->entry_count;
+               if (mcq->hba_index == idx)
+                       break;
+       }
+       return pending_completions;
+
+}
+
+/**
+ * lpfc_sli4_process_missed_mbox_completions - process mbox completions
+ *                                           that were missed.
+ * @phba: Pointer to HBA context object.
+ *
+ * For sli4, it is possible to miss an interrupt. As such mbox completions
+ * maybe missed causing erroneous mailbox timeouts to occur. This function
+ * checks to see if mbox completions are on the mailbox completion queue
+ * and will process all the completions associated with the eq for the
+ * mailbox completion queue.
+ **/
+bool
+lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
+{
+
+       uint32_t eqidx;
+       struct lpfc_queue *fpeq = NULL;
+       struct lpfc_eqe *eqe;
+       bool mbox_pending;
+
+       if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
+               return false;
+
+       /* Find the eq associated with the mcq */
+
+       if (phba->sli4_hba.hba_eq)
+               for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+                       if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
+                           phba->sli4_hba.mbx_cq->assoc_qid) {
+                               fpeq = phba->sli4_hba.hba_eq[eqidx];
+                               break;
+                       }
+       if (!fpeq)
+               return false;
+
+       /* Turn off interrupts from this EQ */
+
+       lpfc_sli4_eq_clr_intr(fpeq);
+
+       /* Check to see if a mbox completion is pending */
+
+       mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
+
+       /*
+        * If a mbox completion is pending, process all the events on EQ
+        * associated with the mbox completion queue (this could include
+        * mailbox commands, async events, els commands, receive queue data
+        * and fcp commands)
+        */
+
+       if (mbox_pending)
+               while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+                       lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
+                       fpeq->EQ_processed++;
+               }
+
+       /* Always clear and re-arm the EQ */
+
+       lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+       return mbox_pending;
+
+}
 
 /**
  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@@ -6583,6 +6687,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
        struct lpfc_sli *psli = &phba->sli;
        struct lpfc_sli_ring *pring;
 
+       /* If the mailbox completed, process the completion and return */
+       if (lpfc_sli4_process_missed_mbox_completions(phba))
+               return;
+
        /* Check the pmbox pointer first.  There is a race condition
         * between the mbox timeout handler getting executed in the
         * worklist and the mailbox actually completing. When this
@@ -7077,6 +7185,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
                                                1000) + jiffies;
        spin_unlock_irq(&phba->hbalock);
 
+       /* Make sure the mailbox is really active */
+       if (timeout)
+               lpfc_sli4_process_missed_mbox_completions(phba);
+
        /* Wait for the outstnading mailbox command to complete */
        while (phba->sli.mbox_active) {
                /* Check active mailbox complete status every 2ms */
@@ -8076,6 +8188,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
                bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
                bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+               wqe->els_req.max_response_payload_len = total_len - xmit_len;
                break;
        case CMD_XMIT_SEQUENCE64_CX:
                bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@@ -8120,8 +8233,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                command_type = FCP_COMMAND_DATA_OUT;
                /* word3 iocb=iotag wqe=payload_offset_len */
                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
-               wqe->fcp_iwrite.payload_offset_len =
-                       xmit_len + sizeof(struct fcp_rsp);
+               bf_set(payload_offset_len, &wqe->fcp_iwrite,
+                      xmit_len + sizeof(struct fcp_rsp));
+               bf_set(cmd_buff_len, &wqe->fcp_iwrite,
+                      0);
                /* word4 iocb=parameter wqe=total_xfer_length memcpy */
                /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
                bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@@ -8139,8 +8254,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
        case CMD_FCP_IREAD64_CR:
                /* word3 iocb=iotag wqe=payload_offset_len */
                /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
-               wqe->fcp_iread.payload_offset_len =
-                       xmit_len + sizeof(struct fcp_rsp);
+               bf_set(payload_offset_len, &wqe->fcp_iread,
+                      xmit_len + sizeof(struct fcp_rsp));
+               bf_set(cmd_buff_len, &wqe->fcp_iread,
+                      0);
                /* word4 iocb=parameter wqe=total_xfer_length memcpy */
                /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
                bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@@ -8156,8 +8273,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
                break;
        case CMD_FCP_ICMND64_CR:
+               /* word3 iocb=iotag wqe=payload_offset_len */
+               /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+               bf_set(payload_offset_len, &wqe->fcp_icmd,
+                      xmit_len + sizeof(struct fcp_rsp));
+               bf_set(cmd_buff_len, &wqe->fcp_icmd,
+                      0);
                /* word3 iocb=IO_TAG wqe=reserved */
-               wqe->fcp_icmd.rsrvd3 = 0;
                bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
                /* Always open the exchange */
                bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@@ -8203,6 +8325,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
                bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
                bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+               wqe->gen_req.max_response_payload_len = total_len - xmit_len;
                command_type = OTHER_COMMAND;
                break;
        case CMD_XMIT_ELS_RSP64_CX:
@@ -10073,6 +10196,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
                if (iocb_completed) {
                        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
                                        "0331 IOCB wake signaled\n");
+                       /* Note: we are not indicating if the IOCB has a success
+                        * status or not - that's for the caller to check.
+                        * IOCB_SUCCESS means just that the command was sent and
+                        * completed. Not that it completed successfully.
+                        * */
                } else if (timeleft == 0) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "0338 IOCB wait timeout error - no "
@@ -11074,8 +11202,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
                              struct lpfc_iocbq *pIocbOut,
                              struct lpfc_wcqe_complete *wcqe)
 {
+       int numBdes, i;
        unsigned long iflags;
-       uint32_t status;
+       uint32_t status, max_response;
+       struct lpfc_dmabuf *dmabuf;
+       struct ulp_bde64 *bpl, bde;
        size_t offset = offsetof(struct lpfc_iocbq, iocb);
 
        memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -11092,7 +11223,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
                        pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
        else {
                pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
-               pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+               switch (pIocbOut->iocb.ulpCommand) {
+               case CMD_ELS_REQUEST64_CR:
+                       dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+                       bpl  = (struct ulp_bde64 *)dmabuf->virt;
+                       bde.tus.w = le32_to_cpu(bpl[1].tus.w);
+                       max_response = bde.tus.f.bdeSize;
+                       break;
+               case CMD_GEN_REQUEST64_CR:
+                       max_response = 0;
+                       if (!pIocbOut->context3)
+                               break;
+                       numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
+                                       sizeof(struct ulp_bde64);
+                       dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
+                       bpl = (struct ulp_bde64 *)dmabuf->virt;
+                       for (i = 0; i < numBdes; i++) {
+                               bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+                               if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+                                       max_response += bde.tus.f.bdeSize;
+                       }
+                       break;
+               default:
+                       max_response = wcqe->total_data_placed;
+                       break;
+               }
+               if (max_response < wcqe->total_data_placed)
+                       pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
+               else
+                       pIocbIn->iocb.un.genreq64.bdl.bdeSize =
+                               wcqe->total_data_placed;
        }
 
        /* Convert BG errors for completion status */
@@ -15098,6 +15258,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
        uint16_t max_rpi, rpi_limit;
        uint16_t rpi_remaining, lrpi = 0;
        struct lpfc_rpi_hdr *rpi_hdr;
+       unsigned long iflag;
 
        max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
        rpi_limit = phba->sli4_hba.next_rpi;
@@ -15106,7 +15267,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
         * Fetch the next logical rpi.  Because this index is logical,
         * the  driver starts at 0 each time.
         */
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irqsave(&phba->hbalock, iflag);
        rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
        if (rpi >= rpi_limit)
                rpi = LPFC_RPI_ALLOC_ERROR;
@@ -15122,7 +15283,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
         */
        if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
            (phba->sli4_hba.rpi_count >= max_rpi)) {
-               spin_unlock_irq(&phba->hbalock);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
                return rpi;
        }
 
@@ -15131,7 +15292,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
         * extents.
         */
        if (!phba->sli4_hba.rpi_hdrs_in_use) {
-               spin_unlock_irq(&phba->hbalock);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
                return rpi;
        }
 
@@ -15142,7 +15303,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
         * how many are supported max by the device.
         */
        rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
        if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
                rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
                if (!rpi_hdr) {
index 85120b77aa0e99aea4621c2204342a47e11ce5ef..298c8cd1a89d9a2d5cd6b94d92bf08b5cd10de02 100644 (file)
@@ -673,6 +673,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *);
 int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
 int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+void lpfc_sli4_free_xri(struct lpfc_hba *, int);
 int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
 int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
index f58f18342bc3e9b001cd9156a64cab7ddf344695..e3094c4e143b4b5caee4e10e4f008edc3730574b 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.42"
+#define LPFC_DRIVER_VERSION "8.3.43"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index a6efc1e088ec322b0d9cbe9963813608c3438be1..84cfe0f62b25f6ad5d6058f3da8730793e110bea 100644 (file)
@@ -3199,11 +3199,11 @@ megasas_get_pd_list(struct megasas_instance *instance)
 
                for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
 
-                       instance->pd_list[pd_addr->deviceId].tid        =
+                       instance->pd_list[le16_to_cpu(pd_addr->deviceId)].tid   =
                                le16_to_cpu(pd_addr->deviceId);
-                       instance->pd_list[pd_addr->deviceId].driveType  =
+                       instance->pd_list[le16_to_cpu(pd_addr->deviceId)].driveType     =
                                                        pd_addr->scsiDevType;
-                       instance->pd_list[pd_addr->deviceId].driveState =
+                       instance->pd_list[le16_to_cpu(pd_addr->deviceId)].driveState    =
                                                        MR_PD_STATE_SYSTEM;
                        pd_addr++;
                }
@@ -3998,7 +3998,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
                 * values
                 */
                if ((prev_aen.members.class <= curr_aen.members.class) &&
-                   !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^
+                   !((prev_aen.members.locale & curr_aen.members.locale) ^
                      curr_aen.members.locale)) {
                        /*
                         * Previously issued event registration includes
@@ -4006,7 +4006,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
                         */
                        return 0;
                } else {
-                       curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale);
+                       curr_aen.members.locale |= prev_aen.members.locale;
 
                        if (prev_aen.members.class < curr_aen.members.class)
                                curr_aen.members.class = prev_aen.members.class;
@@ -4097,7 +4097,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
        class_locale.members.class = MR_EVT_CLASS_DEBUG;
 
        return megasas_register_aen(instance,
-                       le32_to_cpu(eli.newest_seq_num) + 1,
+                       eli.newest_seq_num + 1,
                        class_locale.word);
 }
 
index d99f41c2ca13b927c66d34036937d3443b29f862..a04b4ff8c7f627193c6829e2386c7c8e167632ee 100644 (file)
@@ -308,6 +308,117 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
        return str - buf;
 }
 static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
+/**
+ * pm8001_ctl_ib_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
+       struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+       int offset;
+       char *str = buf;
+       int start = 0;
+#define IB_MEMMAP(c)           \
+               (*(u32 *)((u8 *)pm8001_ha->             \
+               memoryMap.region[IB].virt_ptr +         \
+               pm8001_ha->evtlog_ib_offset + (c)))
+
+       for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+               if (pm8001_ha->chip_id != chip_8001)
+                       str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+               else
+                       str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
+               start = start + 4;
+       }
+       pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
+       if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+               && (pm8001_ha->chip_id != chip_8001))
+               pm8001_ha->evtlog_ib_offset = 0;
+       if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+               && (pm8001_ha->chip_id == chip_8001))
+               pm8001_ha->evtlog_ib_offset = 0;
+
+       return str - buf;
+}
+
+static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
+/**
+ * pm8001_ctl_ob_queue_log_show - Out bound Queue log
+ * @cdev:pointer to embedded class device
+ * @buf: the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+
+static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
+       struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+       int offset;
+       char *str = buf;
+       int start = 0;
+#define OB_MEMMAP(c)           \
+               (*(u32 *)((u8 *)pm8001_ha->             \
+               memoryMap.region[OB].virt_ptr +         \
+               pm8001_ha->evtlog_ob_offset + (c)))
+
+       for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
+               if (pm8001_ha->chip_id != chip_8001)
+                       str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+               else
+                       str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
+               start = start + 4;
+       }
+       pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
+       if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
+                       && (pm8001_ha->chip_id != chip_8001))
+               pm8001_ha->evtlog_ob_offset = 0;
+       if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
+                       && (pm8001_ha->chip_id == chip_8001))
+               pm8001_ha->evtlog_ob_offset = 0;
+
+       return str - buf;
+}
+static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
+/**
+ * pm8001_ctl_bios_version_show - Bios version Display
+ * @cdev:pointer to embedded class device
+ * @buf:the buffer returned
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
+       struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+       char *str = buf;
+       void *virt_addr;
+       int bios_index;
+       DECLARE_COMPLETION_ONSTACK(completion);
+       struct pm8001_ioctl_payload payload;
+
+       pm8001_ha->nvmd_completion = &completion;
+       payload.minor_function = 7;
+       payload.offset = 0;
+       payload.length = 4096;
+       payload.func_specific = kzalloc(4096, GFP_KERNEL);
+       PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+       wait_for_completion(&completion);
+       virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+       for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
+               bios_index++)
+               str += sprintf(str, "%c",
+                       *((u8 *)((u8 *)virt_addr+bios_index)));
+       return str - buf;
+}
+static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
 /**
  * pm8001_ctl_aap_log_show - IOP event log
  * @cdev: pointer to embedded class device
@@ -344,6 +455,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
 }
 static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
 
+/**
+ ** pm8001_ctl_fatal_log_show - fatal error logging
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **
+ ** A sysfs 'read-only' shost attribute.
+ **/
+
+static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
+       struct device_attribute *attr, char *buf)
+{
+       u32 count;
+
+       count = pm80xx_get_fatal_dump(cdev, attr, buf);
+       return count;
+}
+
+static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
+
+
+/**
+ ** pm8001_ctl_gsm_log_show - gsm dump collection
+ ** @cdev:pointer to embedded class device
+ ** @buf: the buffer returned
+ **A sysfs 'read-only' shost attribute.
+ **/
+static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
+       struct device_attribute *attr, char *buf)
+{
+       u32 count;
+
+       count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
+       return count;
+}
+
+static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
+
 #define FLASH_CMD_NONE      0x00
 #define FLASH_CMD_UPDATE    0x01
 #define FLASH_CMD_SET_NVMD    0x02
@@ -603,12 +751,17 @@ struct device_attribute *pm8001_host_attrs[] = {
        &dev_attr_update_fw,
        &dev_attr_aap_log,
        &dev_attr_iop_log,
+       &dev_attr_fatal_log,
+       &dev_attr_gsm_log,
        &dev_attr_max_out_io,
        &dev_attr_max_devices,
        &dev_attr_max_sg_list,
        &dev_attr_sas_spec_support,
        &dev_attr_logging_level,
        &dev_attr_host_sas_address,
+       &dev_attr_bios_version,
+       &dev_attr_ib_log,
+       &dev_attr_ob_log,
        NULL,
 };
 
index 63ad4aa0c4222b0240068e7168af6f9ea79c3dd2..d0d43a250b9ed68b273eba9050661d98e07330da 100644 (file)
@@ -45,6 +45,8 @@
 #define HEADER_LEN                     28
 #define SIZE_OFFSET                    16
 
+#define BIOSOFFSET                     56
+#define BIOS_OFFSET_LIMIT              61
 
 #define FLASH_OK                        0x000000
 #define FAIL_OPEN_BIOS_FILE             0x000100
@@ -53,5 +55,9 @@
 #define FAIL_OUT_MEMORY                 0x000c00
 #define FLASH_IN_PROGRESS               0x001000
 
+#define IB_OB_READ_TIMES                256
+#define SYSFS_OFFSET                    1024
+#define PM80XX_IB_OB_QUEUE_SIZE         (32 * 1024)
+#define PM8001_IB_OB_QUEUE_SIZE         (16 * 1024)
 #endif /* PM8001_CTL_H_INCLUDED */
 
index 479c5a7a863a8aa8fded8e4164b8377d3658089d..74a4bb9af07b61d23a4bfda39b5b341c04f7adfb 100644 (file)
@@ -46,7 +46,10 @@ enum chip_flavors {
        chip_8008,
        chip_8009,
        chip_8018,
-       chip_8019
+       chip_8019,
+       chip_8074,
+       chip_8076,
+       chip_8077
 };
 
 enum phy_speed {
@@ -99,7 +102,8 @@ enum memory_region_num {
        NVMD,       /* NVM device */
        DEV_MEM,    /* memory for devices */
        CCB_MEM,    /* memory for command control block */
-       FW_FLASH    /* memory for fw flash update */
+       FW_FLASH,    /* memory for fw flash update */
+       FORENSIC_MEM  /* memory for fw forensic data */
 };
 #define        PM8001_EVENT_LOG_SIZE    (128 * 1024)
 
index 4a21957521988447fd0264ca1ad65bc289c2eb36..f16ece91b94ac73de979eda5496925c1d253b37b 100644 (file)
@@ -1868,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
        if (unlikely(!t || !t->lldd_task || !t->dev))
                return;
        ts = &t->task_status;
+       /* Print sas address of IO failed device */
+       if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+               (status != IO_UNDERFLOW))
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SAS Address of IO Failure Drive:"
+                       "%016llx", SAS_ADDR(t->dev->sas_addr)));
+
        switch (status) {
        case IO_SUCCESS:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2276,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 param;
        u32 status;
        u32 tag;
+       int i, j;
+       u8 sata_addr_low[4];
+       u32 temp_sata_addr_low;
+       u8 sata_addr_hi[4];
+       u32 temp_sata_addr_hi;
        struct sata_completion_resp *psataPayload;
        struct task_status_struct *ts;
        struct ata_task_resp *resp ;
@@ -2325,7 +2337,46 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                        pm8001_printk("ts null\n"));
                return;
        }
-
+       /* Print sas address of IO failed device */
+       if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+               (status != IO_UNDERFLOW)) {
+               if (!((t->dev->parent) &&
+                       (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+                       for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
+                               sata_addr_low[i] = pm8001_ha->sas_addr[j];
+                       for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
+                               sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+                       memcpy(&temp_sata_addr_low, sata_addr_low,
+                               sizeof(sata_addr_low));
+                       memcpy(&temp_sata_addr_hi, sata_addr_hi,
+                               sizeof(sata_addr_hi));
+                       temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+                                               |((temp_sata_addr_hi << 8) &
+                                               0xff0000) |
+                                               ((temp_sata_addr_hi >> 8)
+                                               & 0xff00) |
+                                               ((temp_sata_addr_hi << 24) &
+                                               0xff000000));
+                       temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+                                               & 0xff) |
+                                               ((temp_sata_addr_low << 8)
+                                               & 0xff0000) |
+                                               ((temp_sata_addr_low >> 8)
+                                               & 0xff00) |
+                                               ((temp_sata_addr_low << 24)
+                                               & 0xff000000)) +
+                                               pm8001_dev->attached_phy +
+                                               0x10);
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("SAS Address of IO Failure Drive:"
+                               "%08x%08x", temp_sata_addr_hi,
+                                       temp_sata_addr_low));
+               } else {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("SAS Address of IO Failure Drive:"
+                               "%016llx", SAS_ADDR(t->dev->sas_addr)));
+               }
+       }
        switch (status) {
        case IO_SUCCESS:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3087,8 +3138,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev = ccb->device;
        u32 status = le32_to_cpu(pPayload->status);
        u32 device_id = le32_to_cpu(pPayload->device_id);
-       u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS;
-       u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS;
+       u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
+       u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
        PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
                "from 0x%x to 0x%x status = 0x%x!\n",
                device_id, pds, nds, status));
@@ -4700,6 +4751,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
        memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
        sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
+       if (pm8001_ha->chip_id != chip_8001)
+               sspTMCmd.ds_ads_m = 0x08;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
        return ret;
@@ -4778,6 +4831,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
                    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
                break;
        }
+       case IOP_RDUMP: {
+               nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
+               nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
+               nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
+               nvmd_req.resp_addr_hi =
+               cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
+               nvmd_req.resp_addr_lo =
+               cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
+               break;
+       }
        default:
                break;
        }
@@ -4938,6 +5001,89 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        return rc;
 }
 
+ssize_t
+pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
+{
+       u32 value, rem, offset = 0, bar = 0;
+       u32 index, work_offset, dw_length;
+       u32 shift_value, gsm_base, gsm_dump_offset;
+       char *direct_data;
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+       direct_data = buf;
+       gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
+
+       /* check max is 1 Mbytes */
+       if ((length > 0x100000) || (gsm_dump_offset & 3) ||
+               ((gsm_dump_offset + length) > 0x1000000))
+                       return 1;
+
+       if (pm8001_ha->chip_id == chip_8001)
+               bar = 2;
+       else
+               bar = 1;
+
+       work_offset = gsm_dump_offset & 0xFFFF0000;
+       offset = gsm_dump_offset & 0x0000FFFF;
+       gsm_dump_offset = work_offset;
+       /* adjust length to dword boundary */
+       rem = length & 3;
+       dw_length = length >> 2;
+
+       for (index = 0; index < dw_length; index++) {
+               if ((work_offset + offset) & 0xFFFF0000) {
+                       if (pm8001_ha->chip_id == chip_8001)
+                               shift_value = ((gsm_dump_offset + offset) &
+                                               SHIFT_REG_64K_MASK);
+                       else
+                               shift_value = (((gsm_dump_offset + offset) &
+                                               SHIFT_REG_64K_MASK) >>
+                                               SHIFT_REG_BIT_SHIFT);
+
+                       if (pm8001_ha->chip_id == chip_8001) {
+                               gsm_base = GSM_BASE;
+                               if (-1 == pm8001_bar4_shift(pm8001_ha,
+                                               (gsm_base + shift_value)))
+                                       return 1;
+                       } else {
+                               gsm_base = 0;
+                               if (-1 == pm80xx_bar4_shift(pm8001_ha,
+                                               (gsm_base + shift_value)))
+                                       return 1;
+                       }
+                       gsm_dump_offset = (gsm_dump_offset + offset) &
+                                               0xFFFF0000;
+                       work_offset = 0;
+                       offset = offset & 0x0000FFFF;
+               }
+               value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+                                               0x0000FFFF);
+               direct_data += sprintf(direct_data, "%08x ", value);
+               offset += 4;
+       }
+       if (rem != 0) {
+               value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
+                                               0x0000FFFF);
+               /* xfr for non_dw */
+               direct_data += sprintf(direct_data, "%08x ", value);
+       }
+       /* Shift back to BAR4 original address */
+       if (pm8001_ha->chip_id == chip_8001) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
+                       return 1;
+       } else {
+               if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
+                       return 1;
+       }
+       pm8001_ha->fatal_forensic_shift_offset += 1024;
+
+       if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
+               pm8001_ha->fatal_forensic_shift_offset = 0;
+       return direct_data - buf;
+}
+
 int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 state)
index d7c1e2034226ea7667201a2a1474400f52994b5d..6d91e2446542c78290dfba8f9e846380ccda3067 100644 (file)
@@ -1027,5 +1027,8 @@ struct set_dev_state_resp {
 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE            0x06
 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID           0x07
 
+#define GSM_BASE                                       0x4F0000
+#define SHIFT_REG_64K_MASK                             0xffff0000
+#define SHIFT_REG_BIT_SHIFT                            8
 #endif
 
index 0dba7c7856ab2bcfd013f6d44afe0c8ea0e5505e..34f5f5ffef056ec2e86e44726dd99c13ae204877 100644 (file)
@@ -54,6 +54,9 @@ static const struct pm8001_chip_info pm8001_chips[] = {
        [chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
        [chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
        [chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
+       [chip_8074] = {0,  8, &pm8001_80xx_dispatch,},
+       [chip_8076] = {0,  16, &pm8001_80xx_dispatch,},
+       [chip_8077] = {0,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -344,6 +347,10 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        /* Memory region for fw flash */
        pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
 
+       pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
+       pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
+       pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
+       pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_mem_alloc(pm8001_ha->pdev,
                        &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -664,6 +671,31 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 #endif
 }
 
+/*
+ * pm8001_get_phy_settings_info : Read phy setting values.
+ * @pm8001_ha : our hba.
+ */
+void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+{
+
+#ifdef PM8001_READ_VPD
+       /*OPTION ROM FLASH read for the SPC cards */
+       DECLARE_COMPLETION_ONSTACK(completion);
+       struct pm8001_ioctl_payload payload;
+
+       pm8001_ha->nvmd_completion = &completion;
+       /* SAS ADDRESS read from flash / EEPROM */
+       payload.minor_function = 6;
+       payload.offset = 0;
+       payload.length = 4096;
+       payload.func_specific = kzalloc(4096, GFP_KERNEL);
+       /* Read phy setting values from flash */
+       PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+       wait_for_completion(&completion);
+       pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+#endif
+}
+
 #ifdef PM8001_USE_MSIX
 /**
  * pm8001_setup_msix - enable MSI-X interrupt
@@ -844,6 +876,10 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
        }
 
        pm8001_init_sas_add(pm8001_ha);
+       /* phy setting support for motherboard controller */
+       if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
+               pdev->subsystem_vendor != 0)
+               pm8001_get_phy_settings_info(pm8001_ha);
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
        if (rc)
@@ -1036,6 +1072,12 @@ static struct pci_device_id pm8001_pci_table[] = {
        { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
        { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
        { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
        { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
                PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
        { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
@@ -1056,6 +1098,24 @@ static struct pci_device_id pm8001_pci_table[] = {
                PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
        { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
                PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8076,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8077,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8074,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
        {} /* terminate list */
 };
 
@@ -1107,8 +1167,11 @@ module_init(pm8001_init);
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
+MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
+MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
 MODULE_DESCRIPTION(
-               "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
+               "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+               "SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
index a85d73de7c80c730260362ae56a7688189459ee6..f4eb18e5163152b414fc8b0a08b4a07a7f844111 100644 (file)
@@ -447,7 +447,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
                        break;
                case SAS_PROTOCOL_SATA:
                case SAS_PROTOCOL_STP:
-               case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
                        rc = pm8001_task_prep_ata(pm8001_ha, ccb);
                        break;
                default:
@@ -704,6 +703,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
        int res, retry;
        struct sas_task *task = NULL;
        struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       DECLARE_COMPLETION_ONSTACK(completion_setstate);
 
        for (retry = 0; retry < 3; retry++) {
                task = sas_alloc_slow_task(GFP_KERNEL);
@@ -729,6 +730,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
                        goto ex_err;
                }
                wait_for_completion(&task->slow_task->completion);
+               if (pm8001_ha->chip_id != chip_8001) {
+                       pm8001_dev->setds_completion = &completion_setstate;
+                               PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+                                       pm8001_dev, 0x01);
+                       wait_for_completion(&completion_setstate);
+               }
                res = -TMF_RESP_FUNC_FAILED;
                /* Even TMF timed out, return direct. */
                if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
index 570819464d90b40568477eac1e4c283ae10c3169..6037d477a183241c0010d73a00580c3667f3aaa9 100644 (file)
@@ -104,6 +104,9 @@ do {                                                \
 
 
 #define DEV_IS_EXPANDER(type)  ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
+#define IS_SPCV_12G(dev)       ((dev->device == 0X8074)                \
+                               || (dev->device == 0X8076)              \
+                               || (dev->device == 0X8077))
 
 #define PM8001_NAME_LENGTH             32/* generic length of strings */
 extern struct list_head hba_list;
@@ -129,6 +132,61 @@ struct pm8001_ioctl_payload {
        u8      *func_specific;
 };
 
+#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
+#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
+#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET            0x00     /* HNFBUFL */
+#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET            0x04     /* HNFBUFH */
+#define MPI_FATAL_EDUMP_TABLE_LENGTH               0x08     /* HNFBLEN */
+#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE            0x0C     /* FDDHSHK */
+#define MPI_FATAL_EDUMP_TABLE_STATUS               0x10     /* FDDTSTAT */
+#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN            0x14     /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_HANDSHAKE_RDY              0x1
+#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY             0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD                 0x0
+#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED           0x1
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
+#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE      0x3
+#define TYPE_GSM_SPACE        1
+#define TYPE_QUEUE            2
+#define TYPE_FATAL            3
+#define TYPE_NON_FATAL        4
+#define TYPE_INBOUND          1
+#define TYPE_OUTBOUND         2
+struct forensic_data {
+       u32  data_type;
+       union {
+               struct {
+                       u32  direct_len;
+                       u32  direct_offset;
+                       void  *direct_data;
+               } gsm_buf;
+               struct {
+                       u16  queue_type;
+                       u16  queue_index;
+                       u32  direct_len;
+                       void  *direct_data;
+               } queue_buf;
+               struct {
+                       u32  direct_len;
+                       u32  direct_offset;
+                       u32  read_len;
+                       void  *direct_data;
+               } data_buf;
+       };
+};
+
+/* bit31-26 - mask bar */
+#define SCRATCH_PAD0_BAR_MASK                    0xFC000000
+/* bit25-0  - offset mask */
+#define SCRATCH_PAD0_OFFSET_MASK                 0x03FFFFFF
+/* if AAP error state */
+#define SCRATCH_PAD0_AAPERR_MASK                 0xFFFFFFFF
+/* Inbound doorbell bit7 */
+#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP       0x80
+/* Inbound doorbell bit7 SPCV */
+#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO  0x80
+#define MAIN_MERRDCTO_MERRDCES                  0xA0/* DWORD 0x28) */
+
 struct pm8001_dispatch {
        char *name;
        int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -343,6 +401,7 @@ union main_cfg_table {
        u32                     phy_attr_table_offset;
        u32                     port_recovery_timer;
        u32                     interrupt_reassertion_delay;
+       u32                     fatal_n_non_fatal_dump;         /* 0x28 */
        } pm80xx_tbl;
 };
 
@@ -417,6 +476,13 @@ struct pm8001_hba_info {
        struct pm8001_hba_memspace io_mem[6];
        struct mpi_mem_req      memoryMap;
        struct encrypt          encrypt_info; /* support encryption */
+       struct forensic_data    forensic_info;
+       u32                     fatal_bar_loc;
+       u32                     forensic_last_offset;
+       u32                     fatal_forensic_shift_offset;
+       u32                     forensic_fatal_step;
+       u32                     evtlog_ib_offset;
+       u32                     evtlog_ob_offset;
        void __iomem    *msg_unit_tbl_addr;/*Message Unit Table Addr*/
        void __iomem    *main_cfg_tbl_addr;/*Main Config Table Addr*/
        void __iomem    *general_stat_tbl_addr;/*General Status Table Addr*/
@@ -425,6 +491,7 @@ struct pm8001_hba_info {
        void __iomem    *pspa_q_tbl_addr;
                        /*MPI SAS PHY attributes Queue Config Table Addr*/
        void __iomem    *ivt_tbl_addr; /*MPI IVT Table Addr */
+       void __iomem    *fatal_tbl_addr; /*MPI IVT Table Addr */
        union main_cfg_table    main_cfg_tbl;
        union general_status_table      gs_tbl;
        struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
@@ -629,7 +696,12 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
 int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
 
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
-
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+       u32 length, u8 *buf);
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+               struct device_attribute *attr, char *buf);
+ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
 /* ctl shared API */
 extern struct device_attribute *pm8001_host_attrs[];
 
index 9f91030211e84e4edb8479e84ef26c8dabe509a0..8987b1706216436ef36392eb329e56320547ab23 100644 (file)
 
 #define SMP_DIRECT 1
 #define SMP_INDIRECT 2
+
+
+int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
+{
+       u32 reg_val;
+       unsigned long start;
+       pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
+       /* confirm the setting is written */
+       start = jiffies + HZ; /* 1 sec */
+       do {
+               reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
+       } while ((reg_val != shift_value) && time_before(jiffies, start));
+       if (reg_val != shift_value) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+                       " = 0x%x\n", reg_val));
+               return -1;
+       }
+       return 0;
+}
+
+void pm80xx_pci_mem_copy(struct pm8001_hba_info  *pm8001_ha, u32 soffset,
+                               const void *destination,
+                               u32 dw_count, u32 bus_base_number)
+{
+       u32 index, value, offset;
+       u32 *destination1;
+       destination1 = (u32 *)destination;
+
+       for (index = 0; index < dw_count; index += 4, destination1++) {
+               offset = (soffset + index / 4);
+               if (offset < (64 * 1024)) {
+                       value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
+                       *destination1 =  cpu_to_le32(value);
+               }
+       }
+       return;
+}
+
+ssize_t pm80xx_get_fatal_dump(struct device *cdev,
+       struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+       void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
+       u32 status = 1;
+       u32 accum_len , reg_val, index, *temp;
+       unsigned long start;
+       u8 *direct_data;
+       char *fatal_error_data = buf;
+
+       pm8001_ha->forensic_info.data_buf.direct_data = buf;
+       if (pm8001_ha->chip_id == chip_8001) {
+               pm8001_ha->forensic_info.data_buf.direct_data +=
+                       sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+                       "Not supported for SPC controller");
+               return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+                       (char *)buf;
+       }
+       if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
+               direct_data = (u8 *)fatal_error_data;
+               pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
+               pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+               pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+               pm8001_ha->forensic_info.data_buf.read_len = 0;
+
+               pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+       }
+
+       if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
+               /* start to get data */
+               /* Program the MEMBASE II Shifting Register with 0x00.*/
+               pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+                               pm8001_ha->fatal_forensic_shift_offset);
+               pm8001_ha->forensic_last_offset = 0;
+               pm8001_ha->forensic_fatal_step = 0;
+               pm8001_ha->fatal_bar_loc = 0;
+       }
+       /* Read until accum_len is retrived */
+       accum_len = pm8001_mr32(fatal_table_address,
+                               MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+       PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
+                                               accum_len));
+       if (accum_len == 0xFFFFFFFF) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Possible PCI issue 0x%x not expected\n",
+                               accum_len));
+               return status;
+       }
+       if (accum_len == 0 || accum_len >= 0x100000) {
+               pm8001_ha->forensic_info.data_buf.direct_data +=
+                       sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+                               "%08x ", 0xFFFFFFFF);
+               return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+                       (char *)buf;
+       }
+       temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
+       if (pm8001_ha->forensic_fatal_step == 0) {
+moreData:
+               if (pm8001_ha->forensic_info.data_buf.direct_data) {
+                       /* Data is in bar, copy to host memory */
+                       pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
+                        pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+                               pm8001_ha->forensic_info.data_buf.direct_len ,
+                                       1);
+               }
+               pm8001_ha->fatal_bar_loc +=
+                       pm8001_ha->forensic_info.data_buf.direct_len;
+               pm8001_ha->forensic_info.data_buf.direct_offset +=
+                       pm8001_ha->forensic_info.data_buf.direct_len;
+               pm8001_ha->forensic_last_offset +=
+                       pm8001_ha->forensic_info.data_buf.direct_len;
+               pm8001_ha->forensic_info.data_buf.read_len =
+                       pm8001_ha->forensic_info.data_buf.direct_len;
+
+               if (pm8001_ha->forensic_last_offset  >= accum_len) {
+                       pm8001_ha->forensic_info.data_buf.direct_data +=
+                       sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+                               "%08x ", 3);
+                       for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+                               pm8001_ha->forensic_info.data_buf.direct_data +=
+                                       sprintf(pm8001_ha->
+                                        forensic_info.data_buf.direct_data,
+                                               "%08x ", *(temp + index));
+                       }
+
+                       pm8001_ha->fatal_bar_loc = 0;
+                       pm8001_ha->forensic_fatal_step = 1;
+                       pm8001_ha->fatal_forensic_shift_offset = 0;
+                       pm8001_ha->forensic_last_offset = 0;
+                       status = 0;
+                       return (char *)pm8001_ha->
+                               forensic_info.data_buf.direct_data -
+                               (char *)buf;
+               }
+               if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
+                       pm8001_ha->forensic_info.data_buf.direct_data +=
+                               sprintf(pm8001_ha->
+                                       forensic_info.data_buf.direct_data,
+                                       "%08x ", 2);
+                       for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+                               pm8001_ha->forensic_info.data_buf.direct_data +=
+                                       sprintf(pm8001_ha->
+                                       forensic_info.data_buf.direct_data,
+                                       "%08x ", *(temp + index));
+                       }
+                       status = 0;
+                       return (char *)pm8001_ha->
+                               forensic_info.data_buf.direct_data -
+                               (char *)buf;
+               }
+
+               /* Increment the MEMBASE II Shifting Register value by 0x100.*/
+               pm8001_ha->forensic_info.data_buf.direct_data +=
+                       sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
+                               "%08x ", 2);
+               for (index = 0; index < 256; index++) {
+                       pm8001_ha->forensic_info.data_buf.direct_data +=
+                               sprintf(pm8001_ha->
+                                       forensic_info.data_buf.direct_data,
+                                               "%08x ", *(temp + index));
+               }
+               pm8001_ha->fatal_forensic_shift_offset += 0x100;
+               pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+                       pm8001_ha->fatal_forensic_shift_offset);
+               pm8001_ha->fatal_bar_loc = 0;
+               status = 0;
+               return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+                       (char *)buf;
+       }
+       if (pm8001_ha->forensic_fatal_step == 1) {
+               pm8001_ha->fatal_forensic_shift_offset = 0;
+               /* Read 64K of the debug data. */
+               pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
+                       pm8001_ha->fatal_forensic_shift_offset);
+               pm8001_mw32(fatal_table_address,
+                       MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+                               MPI_FATAL_EDUMP_HANDSHAKE_RDY);
+
+               /* Poll FDDHSHK  until clear  */
+               start = jiffies + (2 * HZ); /* 2 sec */
+
+               do {
+                       reg_val = pm8001_mr32(fatal_table_address,
+                                       MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
+               } while ((reg_val) && time_before(jiffies, start));
+
+               if (reg_val != 0) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
+                       " = 0x%x\n", reg_val));
+                       return -1;
+               }
+
+               /* Read the next 64K of the debug data. */
+               pm8001_ha->forensic_fatal_step = 0;
+               if (pm8001_mr32(fatal_table_address,
+                       MPI_FATAL_EDUMP_TABLE_STATUS) !=
+                               MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+                       pm8001_mw32(fatal_table_address,
+                               MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
+                       goto moreData;
+               } else {
+                       pm8001_ha->forensic_info.data_buf.direct_data +=
+                               sprintf(pm8001_ha->
+                                       forensic_info.data_buf.direct_data,
+                                               "%08x ", 4);
+                       pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+                       pm8001_ha->forensic_info.data_buf.direct_len =  0;
+                       pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+                       pm8001_ha->forensic_info.data_buf.read_len = 0;
+                       status = 0;
+               }
+       }
+
+       return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
+               (char *)buf;
+}
+
 /**
  * read_main_config_table - read the configure table and save it.
  * @pm8001_ha: our hba card information
@@ -430,7 +652,11 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
        table is updated */
        pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
        /* wait until Inbound DoorBell Clear Register toggled */
-       max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+       if (IS_SPCV_12G(pm8001_ha->pdev)) {
+               max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+       } else {
+               max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+       }
        do {
                udelay(1);
                value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -579,6 +805,9 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->pspa_q_tbl_addr =
                base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
                                        0xFFFFFF);
+       pm8001_ha->fatal_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
+                                       0xFFFFFF);
 
        PM8001_INIT_DBG(pm8001_ha,
                        pm8001_printk("GST OFFSET 0x%x\n",
@@ -913,7 +1142,11 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
        pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
 
        /* wait until Inbound DoorBell Clear Register toggled */
-       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       if (IS_SPCV_12G(pm8001_ha->pdev)) {
+               max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+       } else {
+               max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+       }
        do {
                udelay(1);
                value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@@ -959,6 +1192,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 {
        u32 regval;
        u32 bootloader_state;
+       u32 ibutton0, ibutton1;
 
        /* Check if MPI is in ready state to reset */
        if (mpi_uninit_check(pm8001_ha) != 0) {
@@ -1017,7 +1251,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
        if (-1 == check_fw_ready(pm8001_ha)) {
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("Firmware is not ready!\n"));
-               return -EBUSY;
+               /* check iButton feature support for motherboard controller */
+               if (pm8001_ha->pdev->subsystem_vendor !=
+                       PCI_VENDOR_ID_ADAPTEC2 &&
+                       pm8001_ha->pdev->subsystem_vendor != 0) {
+                       ibutton0 = pm8001_cr32(pm8001_ha, 0,
+                                       MSGU_HOST_SCRATCH_PAD_6);
+                       ibutton1 = pm8001_cr32(pm8001_ha, 0,
+                                       MSGU_HOST_SCRATCH_PAD_7);
+                       if (!ibutton0 && !ibutton1) {
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("iButton Feature is"
+                                       " not Available!!!\n"));
+                               return -EBUSY;
+                       }
+                       if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("CRC Check for iButton"
+                                       " Feature Failed!!!\n"));
+                               return -EBUSY;
+                       }
+               }
        }
        PM8001_INIT_DBG(pm8001_ha,
                pm8001_printk("SPCv soft reset Complete\n"));
@@ -1268,6 +1522,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
        if (unlikely(!t || !t->lldd_task || !t->dev))
                return;
        ts = &t->task_status;
+       /* Print sas address of IO failed device */
+       if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+               (status != IO_UNDERFLOW))
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SAS Address of IO Failure Drive"
+                       ":%016llx", SAS_ADDR(t->dev->sas_addr)));
+
        switch (status) {
        case IO_SUCCESS:
                PM8001_IO_DBG(pm8001_ha,
@@ -1691,6 +1952,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 param;
        u32 status;
        u32 tag;
+       int i, j;
+       u8 sata_addr_low[4];
+       u32 temp_sata_addr_low, temp_sata_addr_hi;
+       u8 sata_addr_hi[4];
        struct sata_completion_resp *psataPayload;
        struct task_status_struct *ts;
        struct ata_task_resp *resp ;
@@ -1740,7 +2005,47 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                        pm8001_printk("ts null\n"));
                return;
        }
+       /* Print sas address of IO failed device */
+       if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
+               (status != IO_UNDERFLOW)) {
+               if (!((t->dev->parent) &&
+                       (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
+                       for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
+                               sata_addr_low[i] = pm8001_ha->sas_addr[j];
+                       for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
+                               sata_addr_hi[i] = pm8001_ha->sas_addr[j];
+                       memcpy(&temp_sata_addr_low, sata_addr_low,
+                               sizeof(sata_addr_low));
+                       memcpy(&temp_sata_addr_hi, sata_addr_hi,
+                               sizeof(sata_addr_hi));
+                       temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
+                                               |((temp_sata_addr_hi << 8) &
+                                               0xff0000) |
+                                               ((temp_sata_addr_hi >> 8)
+                                               & 0xff00) |
+                                               ((temp_sata_addr_hi << 24) &
+                                               0xff000000));
+                       temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
+                                               & 0xff) |
+                                               ((temp_sata_addr_low << 8)
+                                               & 0xff0000) |
+                                               ((temp_sata_addr_low >> 8)
+                                               & 0xff00) |
+                                               ((temp_sata_addr_low << 24)
+                                               & 0xff000000)) +
+                                               pm8001_dev->attached_phy +
+                                               0x10);
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("SAS Address of IO Failure Drive:"
+                               "%08x%08x", temp_sata_addr_hi,
+                                       temp_sata_addr_low));
 
+               } else {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("SAS Address of IO Failure Drive:"
+                               "%016llx", SAS_ADDR(t->dev->sas_addr)));
+               }
+       }
        switch (status) {
        case IO_SUCCESS:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@@ -3103,9 +3408,27 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
                        void *piomb)
 {
-       PM8001_MSG_DBG(pm8001_ha,
-                       pm8001_printk(" pm80xx_addition_functionality\n"));
+       u8 page_code;
+       struct set_phy_profile_resp *pPayload =
+               (struct set_phy_profile_resp *)(piomb + 4);
+       u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
+       u32 status = le32_to_cpu(pPayload->status);
 
+       page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
+       if (status) {
+               /* status is FAILED */
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("PhyProfile command failed  with status "
+                       "0x%08X \n", status));
+               return -1;
+       } else {
+               if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Invalid page code 0x%X\n",
+                                       page_code));
+                       return -1;
+               }
+       }
        return 0;
 }
 
@@ -3484,8 +3807,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        else
                pm8001_ha->smp_exp_mode = SMP_INDIRECT;
 
-       /* DIRECT MODE support only in spcv/ve */
-       pm8001_ha->smp_exp_mode = SMP_DIRECT;
 
        tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
        preq_dma_addr = (char *)phys_to_virt(tmp_addr);
@@ -3501,7 +3822,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                /* exclude top 4 bytes for SMP req header */
                smp_cmd.long_smp_req.long_req_addr =
                        cpu_to_le64((u64)sg_dma_address
-                               (&task->smp_task.smp_req) - 4);
+                               (&task->smp_task.smp_req) + 4);
                /* exclude 4 bytes for SMP req header and CRC */
                smp_cmd.long_smp_req.long_req_size =
                        cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
@@ -3604,10 +3925,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
        struct ssp_ini_io_start_req ssp_cmd;
        u32 tag = ccb->ccb_tag;
        int ret;
-       u64 phys_addr;
+       u64 phys_addr, start_addr, end_addr;
+       u32 end_addr_high, end_addr_low;
        struct inbound_queue_table *circularQ;
-       static u32 inb;
-       static u32 outb;
+       u32 q_index;
        u32 opc = OPC_INB_SSPINIIOSTART;
        memset(&ssp_cmd, 0, sizeof(ssp_cmd));
        memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -3626,7 +3947,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
        ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
        memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
                       task->ssp_task.cmd->cmd_len);
-       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+       circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
 
        /* Check if encryption is set */
        if (pm8001_ha->chip->encrypt &&
@@ -3658,6 +3980,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                                cpu_to_le32(upper_32_bits(dma_addr));
                        ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
                        ssp_cmd.enc_esgl = 0;
+                       /* Check 4G Boundary */
+                       start_addr = cpu_to_le64(dma_addr);
+                       end_addr = (start_addr + ssp_cmd.enc_len) - 1;
+                       end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+                       end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+                       if (end_addr_high != ssp_cmd.enc_addr_high) {
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("The sg list address "
+                                       "start_addr=0x%016llx data_len=0x%x "
+                                       "end_addr_high=0x%08x end_addr_low="
+                                       "0x%08x has crossed 4G boundary\n",
+                                               start_addr, ssp_cmd.enc_len,
+                                               end_addr_high, end_addr_low));
+                               pm8001_chip_make_sg(task->scatter, 1,
+                                       ccb->buf_prd);
+                               phys_addr = ccb->ccb_dma_handle +
+                                       offsetof(struct pm8001_ccb_info,
+                                               buf_prd[0]);
+                               ssp_cmd.enc_addr_low =
+                                       cpu_to_le32(lower_32_bits(phys_addr));
+                               ssp_cmd.enc_addr_high =
+                                       cpu_to_le32(upper_32_bits(phys_addr));
+                               ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+                       }
                } else if (task->num_scatter == 0) {
                        ssp_cmd.enc_addr_low = 0;
                        ssp_cmd.enc_addr_high = 0;
@@ -3674,7 +4020,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
        } else {
                PM8001_IO_DBG(pm8001_ha, pm8001_printk(
                        "Sending Normal SAS command 0x%x inb q %x\n",
-                       task->ssp_task.cmd->cmnd[0], inb));
+                       task->ssp_task.cmd->cmnd[0], q_index));
                /* fill in PRD (scatter/gather) table, if any */
                if (task->num_scatter > 1) {
                        pm8001_chip_make_sg(task->scatter, ccb->n_elem,
@@ -3693,6 +4039,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                                cpu_to_le32(upper_32_bits(dma_addr));
                        ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
                        ssp_cmd.esgl = 0;
+                       /* Check 4G Boundary */
+                       start_addr = cpu_to_le64(dma_addr);
+                       end_addr = (start_addr + ssp_cmd.len) - 1;
+                       end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+                       end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+                       if (end_addr_high != ssp_cmd.addr_high) {
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("The sg list address "
+                                       "start_addr=0x%016llx data_len=0x%x "
+                                       "end_addr_high=0x%08x end_addr_low="
+                                       "0x%08x has crossed 4G boundary\n",
+                                                start_addr, ssp_cmd.len,
+                                                end_addr_high, end_addr_low));
+                               pm8001_chip_make_sg(task->scatter, 1,
+                                       ccb->buf_prd);
+                               phys_addr = ccb->ccb_dma_handle +
+                                       offsetof(struct pm8001_ccb_info,
+                                                buf_prd[0]);
+                               ssp_cmd.addr_low =
+                                       cpu_to_le32(lower_32_bits(phys_addr));
+                               ssp_cmd.addr_high =
+                                       cpu_to_le32(upper_32_bits(phys_addr));
+                               ssp_cmd.esgl = cpu_to_le32(1<<31);
+                       }
                } else if (task->num_scatter == 0) {
                        ssp_cmd.addr_low = 0;
                        ssp_cmd.addr_high = 0;
@@ -3700,11 +4070,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                        ssp_cmd.esgl = 0;
                }
        }
-       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
-
-       /* rotate the outb queue */
-       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
-
+       q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+                                               &ssp_cmd, q_index);
        return ret;
 }
 
@@ -3716,18 +4084,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
        u32 tag = ccb->ccb_tag;
        int ret;
-       static u32 inb;
-       static u32 outb;
+       u32 q_index;
        struct sata_start_req sata_cmd;
        u32 hdr_tag, ncg_tag = 0;
-       u64 phys_addr;
+       u64 phys_addr, start_addr, end_addr;
+       u32 end_addr_high, end_addr_low;
        u32 ATAP = 0x0;
        u32 dir;
        struct inbound_queue_table *circularQ;
        unsigned long flags;
        u32 opc = OPC_INB_SATA_HOST_OPSTART;
        memset(&sata_cmd, 0, sizeof(sata_cmd));
-       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
+       circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
 
        if (task->data_dir == PCI_DMA_NONE) {
                ATAP = 0x04; /* no data*/
@@ -3788,6 +4157,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
                        sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
                        sata_cmd.enc_esgl = 0;
+                       /* Check 4G Boundary */
+                       start_addr = cpu_to_le64(dma_addr);
+                       end_addr = (start_addr + sata_cmd.enc_len) - 1;
+                       end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+                       end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+                       if (end_addr_high != sata_cmd.enc_addr_high) {
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("The sg list address "
+                                       "start_addr=0x%016llx data_len=0x%x "
+                                       "end_addr_high=0x%08x end_addr_low"
+                                       "=0x%08x has crossed 4G boundary\n",
+                                               start_addr, sata_cmd.enc_len,
+                                               end_addr_high, end_addr_low));
+                               pm8001_chip_make_sg(task->scatter, 1,
+                                       ccb->buf_prd);
+                               phys_addr = ccb->ccb_dma_handle +
+                                               offsetof(struct pm8001_ccb_info,
+                                               buf_prd[0]);
+                               sata_cmd.enc_addr_low =
+                                       lower_32_bits(phys_addr);
+                               sata_cmd.enc_addr_high =
+                                       upper_32_bits(phys_addr);
+                               sata_cmd.enc_esgl =
+                                       cpu_to_le32(1 << 31);
+                       }
                } else if (task->num_scatter == 0) {
                        sata_cmd.enc_addr_low = 0;
                        sata_cmd.enc_addr_high = 0;
@@ -3808,7 +4202,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        } else {
                PM8001_IO_DBG(pm8001_ha, pm8001_printk(
                        "Sending Normal SATA command 0x%x inb %x\n",
-                       sata_cmd.sata_fis.command, inb));
+                       sata_cmd.sata_fis.command, q_index));
                /* dad (bit 0-1) is 0 */
                sata_cmd.ncqtag_atap_dir_m_dad =
                        cpu_to_le32(((ncg_tag & 0xff)<<16) |
@@ -3829,6 +4223,30 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        sata_cmd.addr_high = upper_32_bits(dma_addr);
                        sata_cmd.len = cpu_to_le32(task->total_xfer_len);
                        sata_cmd.esgl = 0;
+                       /* Check 4G Boundary */
+                       start_addr = cpu_to_le64(dma_addr);
+                       end_addr = (start_addr + sata_cmd.len) - 1;
+                       end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
+                       end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+                       if (end_addr_high != sata_cmd.addr_high) {
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("The sg list address "
+                                       "start_addr=0x%016llx data_len=0x%x"
+                                       "end_addr_high=0x%08x end_addr_low="
+                                       "0x%08x has crossed 4G boundary\n",
+                                               start_addr, sata_cmd.len,
+                                               end_addr_high, end_addr_low));
+                               pm8001_chip_make_sg(task->scatter, 1,
+                                       ccb->buf_prd);
+                               phys_addr = ccb->ccb_dma_handle +
+                                       offsetof(struct pm8001_ccb_info,
+                                       buf_prd[0]);
+                               sata_cmd.addr_low =
+                                       lower_32_bits(phys_addr);
+                               sata_cmd.addr_high =
+                                       upper_32_bits(phys_addr);
+                               sata_cmd.esgl = cpu_to_le32(1 << 31);
+                       }
                } else if (task->num_scatter == 0) {
                        sata_cmd.addr_low = 0;
                        sata_cmd.addr_high = 0;
@@ -3905,12 +4323,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        }
                }
        }
-
+       q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
        ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
-                                               &sata_cmd, outb++);
-
-       /* rotate the outb queue */
-       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+                                               &sata_cmd, q_index);
        return ret;
 }
 
@@ -3941,9 +4356,16 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
         ** [14]        0b disable spin up hold; 1b enable spin up hold
         ** [15] ob no change in current PHY analig setup 1b enable using SPAST
         */
-       payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
-                       LINKMODE_AUTO | LINKRATE_15 |
-                       LINKRATE_30 | LINKRATE_60 | phy_id);
+       if (!IS_SPCV_12G(pm8001_ha->pdev))
+               payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+                               LINKMODE_AUTO | LINKRATE_15 |
+                               LINKRATE_30 | LINKRATE_60 | phy_id);
+       else
+               payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+                               LINKMODE_AUTO | LINKRATE_15 |
+                               LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
+                               phy_id);
+
        /* SSC Disable and SAS Analog ST configuration */
        /**
        payload.ase_sh_lm_slr_phyid =
@@ -4102,6 +4524,45 @@ pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
        return IRQ_HANDLED;
 }
 
+void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
+       u32 operation, u32 phyid, u32 length, u32 *buf)
+{
+       u32 tag , i, j = 0;
+       int rc;
+       struct set_phy_profile_req payload;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SET_PHY_PROFILE;
+
+       memset(&payload, 0, sizeof(payload));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid  & 0xFF));
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk(" phy profile command for phy %x ,length is %d\n",
+                       payload.ppc_phyid, length));
+       for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
+               payload.reserved[j] =  cpu_to_le32(*((u32 *)buf + i));
+               j++;
+       }
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
+       u32 length, u8 *buf)
+{
+       u32 page_code, i;
+
+       page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
+       for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+               mpi_set_phy_profile_req(pm8001_ha,
+                       SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
+               length = length + PHY_DWORD_LENGTH;
+       }
+       PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
+}
 const struct pm8001_dispatch pm8001_80xx_dispatch = {
        .name                   = "pmc80xx",
        .chip_init              = pm80xx_chip_init,
index 2b760ba75d7bcca5ae008a30621b925414922f44..c86816bea4243354d8864a5422778996ebec215e 100644 (file)
 #define LINKRATE_15                    (0x01 << 8)
 #define LINKRATE_30                    (0x02 << 8)
 #define LINKRATE_60                    (0x06 << 8)
+#define LINKRATE_120                   (0x08 << 8)
+
+/* phy_profile */
+#define SAS_PHY_ANALOG_SETTINGS_PAGE   0x04
+#define PHY_DWORD_LENGTH               0xC
 
 /* Thermal related */
 #define        THERMAL_ENABLE                  0x1
@@ -1223,10 +1228,10 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
 
 /* MSGU CONFIGURATION TABLE*/
 
-#define SPCv_MSGU_CFG_TABLE_UPDATE             0x01
-#define SPCv_MSGU_CFG_TABLE_RESET              0x02
-#define SPCv_MSGU_CFG_TABLE_FREEZE             0x04
-#define SPCv_MSGU_CFG_TABLE_UNFREEZE           0x08
+#define SPCv_MSGU_CFG_TABLE_UPDATE             0x001
+#define SPCv_MSGU_CFG_TABLE_RESET              0x002
+#define SPCv_MSGU_CFG_TABLE_FREEZE             0x004
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE           0x008
 #define MSGU_IBDB_SET                          0x00
 #define MSGU_HOST_INT_STATUS                   0x08
 #define MSGU_HOST_INT_MASK                     0x0C
@@ -1520,4 +1525,6 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE            0x06
 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID           0x07
 
+
+#define MEMBASE_II_SHIFT_REGISTER       0x1010
 #endif
index 41327d46ecf58f9003686429189fdc3ad6e218b8..084d1fd59c9e137e337c663ff076c488957d731a 100644 (file)
@@ -306,6 +306,7 @@ struct ddb_entry {
 struct qla_ddb_index {
        struct list_head list;
        uint16_t fw_ddb_idx;
+       uint16_t flash_ddb_idx;
        struct dev_db_entry fw_ddb;
        uint8_t flash_isid[6];
 };
index 51d1a70f8b452517f4f5fcfb62d903746d08952f..1243e5942b76dfe3883e38303937f0688f22c199 100644 (file)
@@ -539,6 +539,10 @@ struct qla_flt_region {
 #define ENABLE_INTERNAL_LOOPBACK       0x04
 #define ENABLE_EXTERNAL_LOOPBACK       0x08
 
+/* generic defines to enable/disable params */
+#define QL4_PARAM_DISABLE      0
+#define QL4_PARAM_ENABLE       1
+
 /*************************************************************************/
 
 /* Host Adapter Initialization Control Block (from host) */
index e6f2a2669dbd2746f79c95b32fa5973b75595e0f..5cef2527180aaf1541962a5c1caec1ab850d1a48 100644 (file)
@@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
 int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
                           char *password, int bidi, uint16_t *chap_index);
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+                    uint16_t idx, int bidi);
 
 void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
 void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
index 8503ad643bdda896e9f7d70aa2c752762c3f2b0f..655b7bb644d9bc1ad93d5794a708ae2ad610aca1 100644 (file)
@@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha)
        __qla4xxx_disable_intrs(ha);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
+
+static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
+{
+       int type;
+
+       if (chap_entry->flags & BIT_7)
+               type = LOCAL_CHAP;
+       else
+               type = BIDI_CHAP;
+
+       return type;
+}
index 62d4208af21f4a81ac546b26e0a95a90c4af82cf..22cbd005bdf497c7ca5b011e8e21a607041c1c84 100644 (file)
@@ -1530,13 +1530,26 @@ exit_get_chap:
        return ret;
 }
 
-static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
-                           char *password, uint16_t idx, int bidi)
+/**
+ * qla4xxx_set_chap - Make a chap entry at the given index
+ * @ha: pointer to adapter structure
+ * @username: CHAP username to set
+ * @password: CHAP password to set
+ * @idx: CHAP index at which to make the entry
+ * @bidi: type of chap entry (chap_in or chap_out)
+ *
+ * Create chap entry at the given index with the information provided.
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
+                    uint16_t idx, int bidi)
 {
        int ret = 0;
        int rval = QLA_ERROR;
        uint32_t offset = 0;
        struct ql4_chap_table *chap_table;
+       uint32_t chap_size = 0;
        dma_addr_t chap_dma;
 
        chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
@@ -1554,7 +1567,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
        strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
        strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
        chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
-       offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
+
+       if (is_qla40XX(ha)) {
+               chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
+               offset = FLASH_CHAP_OFFSET;
+       } else { /* Single region contains CHAP info for both ports which is
+                 * divided into half for each port.
+                 */
+               chap_size = ha->hw.flt_chap_size / 2;
+               offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
+               if (ha->port_num == 1)
+                       offset += chap_size;
+       }
+
+       offset += (idx * sizeof(struct ql4_chap_table));
        rval = qla4xxx_set_flash(ha, chap_dma, offset,
                                sizeof(struct ql4_chap_table),
                                FLASH_OPT_RMW_COMMIT);
@@ -1611,7 +1637,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
                goto exit_unlock_uni_chap;
        }
 
-       if (!(chap_table->flags & BIT_6)) {
+       if (!(chap_table->flags & BIT_7)) {
                ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
                rval = QLA_ERROR;
                goto exit_unlock_uni_chap;
index 1be6cefc390b9933432de61466d2d029e4f232ae..a28d5e624aabcdd729e4c64c4c904549e21d6e17 100644 (file)
@@ -149,6 +149,8 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
                                 uint32_t *num_entries, char *buf);
 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void  *data,
+                                 int len);
 
 /*
  * SCSI host template entry points
@@ -252,6 +254,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
        .send_ping              = qla4xxx_send_ping,
        .get_chap               = qla4xxx_get_chap_list,
        .delete_chap            = qla4xxx_delete_chap,
+       .set_chap               = qla4xxx_set_chap_entry,
        .get_flashnode_param    = qla4xxx_sysfs_ddb_get_param,
        .set_flashnode_param    = qla4xxx_sysfs_ddb_set_param,
        .new_flashnode          = qla4xxx_sysfs_ddb_add,
@@ -508,6 +511,95 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
        return 0;
 }
 
+static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
+                                    int16_t chap_index,
+                                    struct ql4_chap_table **chap_entry)
+{
+       int rval = QLA_ERROR;
+       int max_chap_entries;
+
+       if (!ha->chap_list) {
+               ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+               rval = QLA_ERROR;
+               goto exit_get_chap;
+       }
+
+       if (is_qla80XX(ha))
+               max_chap_entries = (ha->hw.flt_chap_size / 2) /
+                                  sizeof(struct ql4_chap_table);
+       else
+               max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+       if (chap_index > max_chap_entries) {
+               ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+               rval = QLA_ERROR;
+               goto exit_get_chap;
+       }
+
+       *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
+       if ((*chap_entry)->cookie !=
+            __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+               rval = QLA_ERROR;
+               *chap_entry = NULL;
+       } else {
+               rval = QLA_SUCCESS;
+       }
+
+exit_get_chap:
+       return rval;
+}
+
+/**
+ * qla4xxx_find_free_chap_index - Find the first free chap index
+ * @ha: pointer to adapter structure
+ * @chap_index: CHAP index to be returned
+ *
+ * Find the first free chap index available in the chap table
+ *
+ * Note: Caller should acquire the chap lock before getting here.
+ **/
+static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
+                                       uint16_t *chap_index)
+{
+       int i, rval;
+       int free_index = -1;
+       int max_chap_entries = 0;
+       struct ql4_chap_table *chap_table;
+
+       if (is_qla80XX(ha))
+               max_chap_entries = (ha->hw.flt_chap_size / 2) /
+                                               sizeof(struct ql4_chap_table);
+       else
+               max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+       if (!ha->chap_list) {
+               ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
+               rval = QLA_ERROR;
+               goto exit_find_chap;
+       }
+
+       for (i = 0; i < max_chap_entries; i++) {
+               chap_table = (struct ql4_chap_table *)ha->chap_list + i;
+
+               if ((chap_table->cookie !=
+                   __constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
+                  (i > MAX_RESRV_CHAP_IDX)) {
+                               free_index = i;
+                               break;
+               }
+       }
+
+       if (free_index != -1) {
+               *chap_index = free_index;
+               rval = QLA_SUCCESS;
+       } else {
+               rval = QLA_ERROR;
+       }
+
+exit_find_chap:
+       return rval;
+}
+
 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
                                  uint32_t *num_entries, char *buf)
 {
@@ -691,6 +783,111 @@ exit_delete_chap:
        return ret;
 }
 
+/**
+ * qla4xxx_set_chap_entry - Make chap entry with given information
+ * @shost: pointer to host
+ * @data: chap info - credentials, index and type to make chap entry
+ * @len: length of data
+ *
+ * Add or update chap entry with the given information
+ **/
+static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
+{
+       struct scsi_qla_host *ha = to_qla_host(shost);
+       struct iscsi_chap_rec chap_rec;
+       struct ql4_chap_table *chap_entry = NULL;
+       struct iscsi_param_info *param_info;
+       struct nlattr *attr;
+       int max_chap_entries = 0;
+       int type;
+       int rem = len;
+       int rc = 0;
+
+       memset(&chap_rec, 0, sizeof(chap_rec));
+
+       nla_for_each_attr(attr, data, len, rem) {
+               param_info = nla_data(attr);
+
+               switch (param_info->param) {
+               case ISCSI_CHAP_PARAM_INDEX:
+                       chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
+                       break;
+               case ISCSI_CHAP_PARAM_CHAP_TYPE:
+                       chap_rec.chap_type = param_info->value[0];
+                       break;
+               case ISCSI_CHAP_PARAM_USERNAME:
+                       memcpy(chap_rec.username, param_info->value,
+                              param_info->len);
+                       break;
+               case ISCSI_CHAP_PARAM_PASSWORD:
+                       memcpy(chap_rec.password, param_info->value,
+                              param_info->len);
+                       break;
+               case ISCSI_CHAP_PARAM_PASSWORD_LEN:
+                       chap_rec.password_length = param_info->value[0];
+                       break;
+               default:
+                       ql4_printk(KERN_ERR, ha,
+                                  "%s: No such sysfs attribute\n", __func__);
+                       rc = -ENOSYS;
+                       goto exit_set_chap;
+               };
+       }
+
+       if (chap_rec.chap_type == CHAP_TYPE_IN)
+               type = BIDI_CHAP;
+       else
+               type = LOCAL_CHAP;
+
+       if (is_qla80XX(ha))
+               max_chap_entries = (ha->hw.flt_chap_size / 2) /
+                                  sizeof(struct ql4_chap_table);
+       else
+               max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+       mutex_lock(&ha->chap_sem);
+       if (chap_rec.chap_tbl_idx < max_chap_entries) {
+               rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
+                                              &chap_entry);
+               if (!rc) {
+                       if (!(type == qla4xxx_get_chap_type(chap_entry))) {
+                               ql4_printk(KERN_INFO, ha,
+                                          "Type mismatch for CHAP entry %d\n",
+                                          chap_rec.chap_tbl_idx);
+                               rc = -EINVAL;
+                               goto exit_unlock_chap;
+                       }
+
+                       /* If chap index is in use then don't modify it */
+                       rc = qla4xxx_is_chap_active(shost,
+                                                   chap_rec.chap_tbl_idx);
+                       if (rc) {
+                               ql4_printk(KERN_INFO, ha,
+                                          "CHAP entry %d is in use\n",
+                                          chap_rec.chap_tbl_idx);
+                               rc = -EBUSY;
+                               goto exit_unlock_chap;
+                       }
+               }
+       } else {
+               rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
+               if (rc) {
+                       ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
+                       rc = -EBUSY;
+                       goto exit_unlock_chap;
+               }
+       }
+
+       rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
+                             chap_rec.chap_tbl_idx, type);
+
+exit_unlock_chap:
+       mutex_unlock(&ha->chap_sem);
+
+exit_set_chap:
+       return rc;
+}
+
 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
                                   enum iscsi_param_type param_type,
                                   int param, char *buf)
@@ -1455,9 +1652,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
        struct iscsi_session *sess = cls_sess->dd_data;
        struct ddb_entry *ddb_entry = sess->dd_data;
        struct scsi_qla_host *ha = ddb_entry->ha;
+       struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
+       struct ql4_chap_table chap_tbl;
        int rval, len;
        uint16_t idx;
 
+       memset(&chap_tbl, 0, sizeof(chap_tbl));
        switch (param) {
        case ISCSI_PARAM_CHAP_IN_IDX:
                rval = qla4xxx_get_chap_index(ha, sess->username_in,
@@ -1469,14 +1669,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
                        len = sprintf(buf, "%hu\n", idx);
                break;
        case ISCSI_PARAM_CHAP_OUT_IDX:
-               rval = qla4xxx_get_chap_index(ha, sess->username,
-                                             sess->password, LOCAL_CHAP,
-                                             &idx);
+               if (ddb_entry->ddb_type == FLASH_DDB) {
+                       if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+                               idx = ddb_entry->chap_tbl_idx;
+                               rval = QLA_SUCCESS;
+                       } else {
+                               rval = QLA_ERROR;
+                       }
+               } else {
+                       rval = qla4xxx_get_chap_index(ha, sess->username,
+                                                     sess->password,
+                                                     LOCAL_CHAP, &idx);
+               }
                if (rval)
                        len = sprintf(buf, "\n");
                else
                        len = sprintf(buf, "%hu\n", idx);
                break;
+       case ISCSI_PARAM_USERNAME:
+       case ISCSI_PARAM_PASSWORD:
+               /* First, populate session username and password for FLASH DDB,
+                * if not already done. This happens when session login fails
+                * for a FLASH DDB.
+                */
+               if (ddb_entry->ddb_type == FLASH_DDB &&
+                   ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
+                   !sess->username && !sess->password) {
+                       idx = ddb_entry->chap_tbl_idx;
+                       rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+                                                           chap_tbl.secret,
+                                                           idx);
+                       if (!rval) {
+                               iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+                                               (char *)chap_tbl.name,
+                                               strlen((char *)chap_tbl.name));
+                               iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+                                               (char *)chap_tbl.secret,
+                                               chap_tbl.secret_len);
+                       }
+               }
+               /* allow fall-through */
        default:
                return iscsi_session_get_param(cls_sess, param, buf);
        }
@@ -2373,11 +2605,6 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
        COPY_ISID(sess->isid, fw_ddb_entry->isid);
 
        ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
-       if (ddb_link < MAX_DDB_ENTRIES)
-               sess->discovery_parent_idx = ddb_link;
-       else
-               sess->discovery_parent_idx = DDB_NO_LINK;
-
        if (ddb_link == DDB_ISNS)
                disc_parent = ISCSI_DISC_PARENT_ISNS;
        else if (ddb_link == DDB_NO_LINK)
@@ -2402,6 +2629,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
        int buflen = 0;
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
+       struct ql4_chap_table chap_tbl;
        struct iscsi_conn *conn;
        char ip_addr[DDB_IPADDR_LEN];
        uint16_t options = 0;
@@ -2409,6 +2637,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        conn = cls_conn->dd_data;
+       memset(&chap_tbl, 0, sizeof(chap_tbl));
 
        ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
 
@@ -2435,6 +2664,19 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
                        (char *)fw_ddb_entry->iscsi_name, buflen);
        iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
                        (char *)ha->name_string, buflen);
+
+       if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
+               if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
+                                                  chap_tbl.secret,
+                                                  ddb_entry->chap_tbl_idx)) {
+                       iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
+                                       (char *)chap_tbl.name,
+                                       strlen((char *)chap_tbl.name));
+                       iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
+                                       (char *)chap_tbl.secret,
+                                       chap_tbl.secret_len);
+               }
+       }
 }
 
 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -4937,7 +5179,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
 }
 
 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
-                                    struct dev_db_entry *fw_ddb_entry)
+                                    struct dev_db_entry *fw_ddb_entry,
+                                    uint32_t *index)
 {
        struct ddb_entry *ddb_entry;
        struct ql4_tuple_ddb *fw_tddb = NULL;
@@ -4971,6 +5214,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
                qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
                if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
                        ret = QLA_SUCCESS; /* found */
+                       if (index != NULL)
+                               *index = idx;
                        goto exit_check;
                }
        }
@@ -5206,6 +5451,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
        ddb_entry->ha = ha;
        ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
        ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+       ddb_entry->chap_tbl_idx = INVALID_ENTRY;
 
        atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
        atomic_set(&ddb_entry->relogin_timer, 0);
@@ -5267,6 +5513,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
        } while (time_after(wtime, jiffies));
 }
 
+static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
+                                 struct dev_db_entry *flash_ddb_entry)
+{
+       uint16_t options = 0;
+       size_t ip_len = IP_ADDR_LEN;
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               ip_len = IPv6_ADDR_LEN;
+
+       if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
+               return QLA_ERROR;
+
+       if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
+                  sizeof(fw_ddb_entry->isid)))
+               return QLA_ERROR;
+
+       if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
+                  sizeof(fw_ddb_entry->port)))
+               return QLA_ERROR;
+
+       return QLA_SUCCESS;
+}
+
+static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry,
+                                    uint32_t fw_idx, uint32_t *flash_index)
+{
+       struct dev_db_entry *flash_ddb_entry;
+       dma_addr_t flash_ddb_entry_dma;
+       uint32_t idx = 0;
+       int max_ddbs;
+       int ret = QLA_ERROR, status;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                        &flash_ddb_entry_dma);
+       if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
+               ql4_printk(KERN_ERR, ha, "Out of memory\n");
+               goto exit_find_st_idx;
+       }
+
+       status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+                                         flash_ddb_entry_dma, fw_idx);
+       if (status == QLA_SUCCESS) {
+               status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+               if (status == QLA_SUCCESS) {
+                       *flash_index = fw_idx;
+                       ret = QLA_SUCCESS;
+                       goto exit_find_st_idx;
+               }
+       }
+
+       for (idx = 0; idx < max_ddbs; idx++) {
+               status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
+                                                 flash_ddb_entry_dma, idx);
+               if (status == QLA_ERROR)
+                       continue;
+
+               status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
+               if (status == QLA_SUCCESS) {
+                       *flash_index = idx;
+                       ret = QLA_SUCCESS;
+                       goto exit_find_st_idx;
+               }
+       }
+
+       if (idx == max_ddbs)
+               ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
+                          fw_idx);
+
+exit_find_st_idx:
+       if (flash_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
+                             flash_ddb_entry_dma);
+
+       return ret;
+}
+
 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
                                  struct list_head *list_st)
 {
@@ -5278,6 +5605,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
+       uint32_t flash_index = -1;
        uint16_t conn_id = 0;
 
        fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5310,6 +5638,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
                if (!st_ddb_idx)
                        break;
 
+               ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
+                                               &flash_index);
+               if (ret == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha,
+                                  "No flash entry for ST at idx [%d]\n", idx);
+                       st_ddb_idx->flash_ddb_idx = idx;
+               } else {
+                       ql4_printk(KERN_INFO, ha,
+                                  "ST at idx [%d] is stored at flash [%d]\n",
+                                  idx, flash_index);
+                       st_ddb_idx->flash_ddb_idx = flash_index;
+               }
+
                st_ddb_idx->fw_ddb_idx = idx;
 
                list_add_tail(&st_ddb_idx->list, list_st);
@@ -5354,6 +5695,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
        }
 }
 
+static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
+                                        struct ddb_entry *ddb_entry,
+                                        struct dev_db_entry *fw_ddb_entry)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       uint32_t max_ddbs = 0;
+       uint16_t ddb_link = -1;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       cls_sess = ddb_entry->sess;
+       sess = cls_sess->dd_data;
+
+       ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+       if (ddb_link < max_ddbs)
+               sess->discovery_parent_idx = ddb_link;
+       else
+               sess->discovery_parent_idx = DDB_NO_LINK;
+}
+
 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
                                   struct dev_db_entry *fw_ddb_entry,
                                   int is_reset, uint16_t idx)
@@ -5418,6 +5781,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
 
        /* Update sess/conn params */
        qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+       qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
 
        if (is_reset == RESET_ADAPTER) {
                iscsi_block_session(cls_sess);
@@ -5434,17 +5798,43 @@ exit_setup:
        return ret;
 }
 
+static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
+                                      struct list_head *list_ddb,
+                                      struct dev_db_entry *fw_ddb_entry)
+{
+       struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+       uint16_t ddb_link;
+
+       ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+
+       list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+               if (ddb_idx->fw_ddb_idx == ddb_link) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Updating NT parent idx from [%d] to [%d]\n",
+                                         ddb_link, ddb_idx->flash_ddb_idx));
+                       fw_ddb_entry->ddb_link =
+                                           cpu_to_le16(ddb_idx->flash_ddb_idx);
+                       return;
+               }
+       }
+}
+
 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
-                                 struct list_head *list_nt, int is_reset)
+                                 struct list_head *list_nt,
+                                 struct list_head *list_st,
+                                 int is_reset)
 {
        struct dev_db_entry *fw_ddb_entry;
+       struct ddb_entry *ddb_entry = NULL;
        dma_addr_t fw_ddb_dma;
        int max_ddbs;
        int fw_idx_size;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
+       uint32_t ddb_idx = -1;
        uint16_t conn_id = 0;
+       uint16_t ddb_link = -1;
        struct qla_ddb_index  *nt_ddb_idx;
 
        fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@@ -5471,12 +5861,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
                if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
                        goto continue_next_nt;
 
+               ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+               if (ddb_link < max_ddbs)
+                       qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
+
                if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
-                   state == DDB_DS_SESSION_FAILED))
+                   state == DDB_DS_SESSION_FAILED) &&
+                   (is_reset == INIT_ADAPTER))
                        goto continue_next_nt;
 
                DEBUG2(ql4_printk(KERN_INFO, ha,
                                  "Adding  DDB to session = 0x%x\n", idx));
+
                if (is_reset == INIT_ADAPTER) {
                        nt_ddb_idx = vmalloc(fw_idx_size);
                        if (!nt_ddb_idx)
@@ -5506,9 +5902,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
 
                        list_add_tail(&nt_ddb_idx->list, list_nt);
                } else if (is_reset == RESET_ADAPTER) {
-                       if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
-                                                               QLA_SUCCESS)
+                       ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
+                                                       &ddb_idx);
+                       if (ret == QLA_SUCCESS) {
+                               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
+                                                                      ddb_idx);
+                               if (ddb_entry != NULL)
+                                       qla4xxx_update_sess_disc_idx(ha,
+                                                                    ddb_entry,
+                                                                 fw_ddb_entry);
                                goto continue_next_nt;
+                       }
                }
 
                ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
@@ -5526,7 +5930,8 @@ exit_nt_list:
 }
 
 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
-                                     struct list_head *list_nt)
+                                     struct list_head *list_nt,
+                                     uint16_t target_id)
 {
        struct dev_db_entry *fw_ddb_entry;
        dma_addr_t fw_ddb_dma;
@@ -5571,13 +5976,16 @@ static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
 
                nt_ddb_idx->fw_ddb_idx = idx;
 
-               ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+               ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
                if (ret == QLA_SUCCESS) {
                        /* free nt_ddb_idx and do not add to list_nt */
                        vfree(nt_ddb_idx);
                        goto continue_next_new_nt;
                }
 
+               if (target_id < max_ddbs)
+                       fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
+
                list_add_tail(&nt_ddb_idx->list, list_nt);
 
                ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
@@ -5894,7 +6302,8 @@ exit_ddb_conn_open:
 }
 
 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
-                               struct dev_db_entry *fw_ddb_entry)
+                               struct dev_db_entry *fw_ddb_entry,
+                               uint16_t target_id)
 {
        struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
        struct list_head list_nt;
@@ -5919,7 +6328,7 @@ static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
        if (ret == QLA_ERROR)
                goto exit_login_st;
 
-       qla4xxx_build_new_nt_list(ha, &list_nt);
+       qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
 
        list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
                list_del_init(&ddb_idx->list);
@@ -5946,7 +6355,7 @@ static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
 {
        int ret = QLA_ERROR;
 
-       ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+       ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
        if (ret != QLA_SUCCESS)
                ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
                                              idx);
@@ -6001,7 +6410,8 @@ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
        fw_ddb_entry->cookie = DDB_VALID_COOKIE;
 
        if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
-               ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
+               ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
+                                          fnode_sess->target_id);
        else
                ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
                                           fnode_sess->target_id);
@@ -6522,10 +6932,13 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
        struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
        struct scsi_qla_host *ha = to_qla_host(shost);
        struct iscsi_flashnode_param_info *fnode_param;
+       struct ql4_chap_table chap_tbl;
        struct nlattr *attr;
+       uint16_t chap_out_idx = INVALID_ENTRY;
        int rc = QLA_ERROR;
        uint32_t rem = len;
 
+       memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
        nla_for_each_attr(attr, data, len, rem) {
                fnode_param = nla_data(attr);
 
@@ -6567,6 +6980,10 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
                        break;
                case ISCSI_FLASHNODE_CHAP_AUTH_EN:
                        fnode_sess->chap_auth_en = fnode_param->value[0];
+                       /* Invalidate chap index if chap auth is disabled */
+                       if (!fnode_sess->chap_auth_en)
+                               fnode_sess->chap_out_idx = INVALID_ENTRY;
+
                        break;
                case ISCSI_FLASHNODE_SNACK_REQ_EN:
                        fnode_conn->snack_req_en = fnode_param->value[0];
@@ -6705,6 +7122,17 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
                        fnode_conn->exp_statsn =
                                                *(uint32_t *)fnode_param->value;
                        break;
+               case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+                       chap_out_idx = *(uint16_t *)fnode_param->value;
+                       if (!qla4xxx_get_uni_chap_at_index(ha,
+                                                          chap_tbl.name,
+                                                          chap_tbl.secret,
+                                                          chap_out_idx)) {
+                               fnode_sess->chap_out_idx = chap_out_idx;
+                               /* Enable chap auth if chap index is valid */
+                               fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
+                       }
+                       break;
                default:
                        ql4_printk(KERN_ERR, ha,
                                   "%s: No such sysfs attribute\n", __func__);
@@ -6926,11 +7354,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
                schedule_timeout_uninterruptible(HZ / 10);
        } while (time_after(wtime, jiffies));
 
-       /* Free up the sendtargets list */
-       qla4xxx_free_ddb_list(&list_st);
 
-       qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+       qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
 
+       qla4xxx_free_ddb_list(&list_st);
        qla4xxx_free_ddb_list(&list_nt);
 
        qla4xxx_free_ddb_index(ha);
index 01c0ffa31276296a985d1381d53e1fc7ac069e80..80b8b10edf4186bbbca2224c041b78fd5b0c8568 100644 (file)
@@ -169,7 +169,7 @@ static int scsi_debug_dix = DEF_DIX;
 static int scsi_debug_dsense = DEF_D_SENSE;
 static int scsi_debug_every_nth = DEF_EVERY_NTH;
 static int scsi_debug_fake_rw = DEF_FAKE_RW;
-static int scsi_debug_guard = DEF_GUARD;
+static unsigned int scsi_debug_guard = DEF_GUARD;
 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
 static int scsi_debug_max_luns = DEF_MAX_LUNS;
 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
@@ -293,6 +293,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
                                   0, 0, 0x0, 0x0};
 
+static void *fake_store(unsigned long long lba)
+{
+       lba = do_div(lba, sdebug_store_sectors);
+
+       return fake_storep + lba * scsi_debug_sector_size;
+}
+
+static struct sd_dif_tuple *dif_store(sector_t sector)
+{
+       sector = do_div(sector, sdebug_store_sectors);
+
+       return dif_storep + sector;
+}
+
 static int sdebug_add_adapter(void);
 static void sdebug_remove_adapter(void);
 
@@ -1731,25 +1745,22 @@ static int do_device_access(struct scsi_cmnd *scmd,
        return ret;
 }
 
-static u16 dif_compute_csum(const void *buf, int len)
+static __be16 dif_compute_csum(const void *buf, int len)
 {
-       u16 csum;
+       __be16 csum;
 
-       switch (scsi_debug_guard) {
-       case 1:
-               csum = ip_compute_csum(buf, len);
-               break;
-       case 0:
+       if (scsi_debug_guard)
+               csum = (__force __be16)ip_compute_csum(buf, len);
+       else
                csum = cpu_to_be16(crc_t10dif(buf, len));
-               break;
-       }
+
        return csum;
 }
 
 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
                      sector_t sector, u32 ei_lba)
 {
-       u16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+       __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
 
        if (sdt->guard_tag != csum) {
                pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@@ -1775,59 +1786,71 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
        return 0;
 }
 
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
-                           unsigned int sectors, u32 ei_lba)
+static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+                         unsigned int sectors, bool read)
 {
        unsigned int i, resid;
        struct scatterlist *psgl;
-       struct sd_dif_tuple *sdt;
-       sector_t sector;
-       sector_t tmp_sec = start_sec;
        void *paddr;
+       const void *dif_store_end = dif_storep + sdebug_store_sectors;
 
-       start_sec = do_div(tmp_sec, sdebug_store_sectors);
+       /* Bytes of protection data to copy into sgl */
+       resid = sectors * sizeof(*dif_storep);
 
-       sdt = dif_storep + start_sec;
+       scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
+               int len = min(psgl->length, resid);
+               void *start = dif_store(sector);
+               int rest = 0;
 
-       for (i = 0 ; i < sectors ; i++) {
-               int ret;
+               if (dif_store_end < start + len)
+                       rest = start + len - dif_store_end;
 
-               if (sdt[i].app_tag == 0xffff)
-                       continue;
+               paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
 
-               sector = start_sec + i;
+               if (read)
+                       memcpy(paddr, start, len - rest);
+               else
+                       memcpy(start, paddr, len - rest);
 
-               ret = dif_verify(&sdt[i],
-                                fake_storep + sector * scsi_debug_sector_size,
-                                sector, ei_lba);
-               if (ret) {
-                       dif_errors++;
-                       return ret;
+               if (rest) {
+                       if (read)
+                               memcpy(paddr + len - rest, dif_storep, rest);
+                       else
+                               memcpy(dif_storep, paddr + len - rest, rest);
                }
 
-               ei_lba++;
+               sector += len / sizeof(*dif_storep);
+               resid -= len;
+               kunmap_atomic(paddr);
        }
+}
 
-       /* Bytes of protection data to copy into sgl */
-       resid = sectors * sizeof(*dif_storep);
-       sector = start_sec;
+static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+                           unsigned int sectors, u32 ei_lba)
+{
+       unsigned int i;
+       struct sd_dif_tuple *sdt;
+       sector_t sector;
 
-       scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
-               int len = min(psgl->length, resid);
+       for (i = 0; i < sectors; i++) {
+               int ret;
 
-               paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
-               memcpy(paddr, dif_storep + sector, len);
+               sector = start_sec + i;
+               sdt = dif_store(sector);
 
-               sector += len / sizeof(*dif_storep);
-               if (sector >= sdebug_store_sectors) {
-                       /* Force wrap */
-                       tmp_sec = sector;
-                       sector = do_div(tmp_sec, sdebug_store_sectors);
+               if (sdt->app_tag == cpu_to_be16(0xffff))
+                       continue;
+
+               ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+               if (ret) {
+                       dif_errors++;
+                       return ret;
                }
-               resid -= len;
-               kunmap_atomic(paddr);
+
+               ei_lba++;
        }
 
+       dif_copy_prot(SCpnt, start_sec, sectors, true);
        dix_reads++;
 
        return 0;
@@ -1910,15 +1933,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
 {
        int i, j, ret;
        struct sd_dif_tuple *sdt;
-       struct scatterlist *dsgl = scsi_sglist(SCpnt);
+       struct scatterlist *dsgl;
        struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
        void *daddr, *paddr;
-       sector_t tmp_sec = start_sec;
-       sector_t sector;
+       sector_t sector = start_sec;
        int ppage_offset;
 
-       sector = do_div(tmp_sec, sdebug_store_sectors);
-
        BUG_ON(scsi_sg_count(SCpnt) == 0);
        BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
 
@@ -1946,25 +1966,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
 
                        sdt = paddr + ppage_offset;
 
-                       ret = dif_verify(sdt, daddr + j, start_sec, ei_lba);
+                       ret = dif_verify(sdt, daddr + j, sector, ei_lba);
                        if (ret) {
                                dump_sector(daddr + j, scsi_debug_sector_size);
                                goto out;
                        }
 
-                       /* Would be great to copy this in bigger
-                        * chunks.  However, for the sake of
-                        * correctness we need to verify each sector
-                        * before writing it to "stable" storage
-                        */
-                       memcpy(dif_storep + sector, sdt, sizeof(*sdt));
-
                        sector++;
-
-                       if (sector == sdebug_store_sectors)
-                               sector = 0;     /* Force wrap */
-
-                       start_sec++;
                        ei_lba++;
                        ppage_offset += sizeof(struct sd_dif_tuple);
                }
@@ -1973,6 +1981,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
                kunmap_atomic(daddr);
        }
 
+       dif_copy_prot(SCpnt, start_sec, sectors, false);
        dix_writes++;
 
        return 0;
@@ -2742,7 +2751,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
@@ -3172,7 +3181,7 @@ DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
 
 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
 {
-       return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
 }
 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
 
index 4c5aabe21755cb82450876666145f1f2add7ad1f..af4c050ce6e4e96179f586aae4c54690229b9705 100644 (file)
@@ -54,7 +54,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
                /*
                 * All the high-level SCSI drivers that implement runtime
                 * PM treat runtime suspend, system suspend, and system
-                * hibernate identically.
+                * hibernate nearly identically. In all cases the requirements
+                * for runtime suspension are stricter.
                 */
                if (pm_runtime_suspended(dev))
                        return 0;
index 40c639491b27ce5857a7b28f4237d8b33800f6df..a73471074a02defa3f50733a4a0ce315da85dc29 100644 (file)
@@ -529,6 +529,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)
  */
 sdev_rd_attr (device_blocked, "%d\n");
 sdev_rd_attr (queue_depth, "%d\n");
+sdev_rd_attr (device_busy, "%d\n");
 sdev_rd_attr (type, "%d\n");
 sdev_rd_attr (scsi_level, "%d\n");
 sdev_rd_attr (vendor, "%.8s\n");
@@ -750,6 +751,7 @@ static struct attribute *scsi_sdev_attrs[] = {
        &dev_attr_device_blocked.attr,
        &dev_attr_type.attr,
        &dev_attr_scsi_level.attr,
+       &dev_attr_device_busy.attr,
        &dev_attr_vendor.attr,
        &dev_attr_model.attr,
        &dev_attr_rev.attr,
index e4a989fa477d3bc3f94788455dec8f0b6ba2ff5d..63a6ca49d4e5e8d1b0f53c592943c279bcffcb60 100644 (file)
@@ -2744,6 +2744,28 @@ exit_get_chap:
        return err;
 }
 
+static int iscsi_set_chap(struct iscsi_transport *transport,
+                         struct iscsi_uevent *ev, uint32_t len)
+{
+       char *data = (char *)ev + sizeof(*ev);
+       struct Scsi_Host *shost;
+       int err = 0;
+
+       if (!transport->set_chap)
+               return -ENOSYS;
+
+       shost = scsi_host_lookup(ev->u.set_path.host_no);
+       if (!shost) {
+               pr_err("%s could not find host no %u\n",
+                      __func__, ev->u.set_path.host_no);
+               return -ENODEV;
+       }
+
+       err = transport->set_chap(shost, data, len);
+       scsi_host_put(shost);
+       return err;
+}
+
 static int iscsi_delete_chap(struct iscsi_transport *transport,
                             struct iscsi_uevent *ev)
 {
@@ -3234,6 +3256,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
        case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
                err = iscsi_logout_flashnode_sid(transport, ev);
                break;
+       case ISCSI_UEVENT_SET_CHAP:
+               err = iscsi_set_chap(transport, ev,
+                                    nlmsg_attrlen(nlh, sizeof(*ev)));
+               break;
        default:
                err = -ENOSYS;
                break;
index 5693f6d7eddb8b20cd9b3e0374a4e73e60ab7aa2..fd874b846dc12e6b43c830ea9167957f4e667ca0 100644 (file)
@@ -105,7 +105,8 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
 static int  sd_probe(struct device *);
 static int  sd_remove(struct device *);
 static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *);
+static int sd_suspend_system(struct device *);
+static int sd_suspend_runtime(struct device *);
 static int sd_resume(struct device *);
 static void sd_rescan(struct device *);
 static int sd_done(struct scsi_cmnd *);
@@ -484,11 +485,11 @@ static struct class sd_disk_class = {
 };
 
 static const struct dev_pm_ops sd_pm_ops = {
-       .suspend                = sd_suspend,
+       .suspend                = sd_suspend_system,
        .resume                 = sd_resume,
-       .poweroff               = sd_suspend,
+       .poweroff               = sd_suspend_system,
        .restore                = sd_resume,
-       .runtime_suspend        = sd_suspend,
+       .runtime_suspend        = sd_suspend_runtime,
        .runtime_resume         = sd_resume,
 };
 
@@ -829,7 +830,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
 
 static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
 {
-       rq->timeout = SD_FLUSH_TIMEOUT;
+       rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;
        rq->retries = SD_MAX_RETRIES;
        rq->cmd[0] = SYNCHRONIZE_CACHE;
        rq->cmd_len = 10;
@@ -1433,12 +1434,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
 {
        int retries, res;
        struct scsi_device *sdp = sdkp->device;
+       const int timeout = sdp->request_queue->rq_timeout
+               * SD_FLUSH_TIMEOUT_MULTIPLIER;
        struct scsi_sense_hdr sshdr;
 
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
-
        for (retries = 3; retries > 0; --retries) {
                unsigned char cmd[10] = { 0 };
 
@@ -1448,20 +1450,39 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                 * flush everything.
                 */
                res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
-                                            &sshdr, SD_FLUSH_TIMEOUT,
-                                            SD_MAX_RETRIES, NULL, REQ_PM);
+                                            &sshdr, timeout, SD_MAX_RETRIES,
+                                            NULL, REQ_PM);
                if (res == 0)
                        break;
        }
 
        if (res) {
                sd_print_result(sdkp, res);
+
                if (driver_byte(res) & DRIVER_SENSE)
                        sd_print_sense_hdr(sdkp, &sshdr);
+               /* we need to evaluate the error return  */
+               if (scsi_sense_valid(&sshdr) &&
+                       /* 0x3a is medium not present */
+                       sshdr.asc == 0x3a)
+                               /* this is no error here */
+                               return 0;
+
+               switch (host_byte(res)) {
+               /* ignore errors due to racing a disconnection */
+               case DID_BAD_TARGET:
+               case DID_NO_CONNECT:
+                       return 0;
+               /* signal the upper layer it might try again */
+               case DID_BUS_BUSY:
+               case DID_IMM_RETRY:
+               case DID_REQUEUE:
+               case DID_SOFT_ERROR:
+                       return -EBUSY;
+               default:
+                       return -EIO;
+               }
        }
-
-       if (res)
-               return -EIO;
        return 0;
 }
 
@@ -2639,13 +2660,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
        struct scsi_device *sdev = sdkp->device;
 
        if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
+               /* too large values might cause issues with arcmsr */
+               int vpd_buf_len = 64;
+
                sdev->no_report_opcodes = 1;
 
                /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
                 * CODES is unsupported and the device has an ATA
                 * Information VPD page (SAT).
                 */
-               if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE))
+               if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
                        sdev->no_write_same = 1;
        }
 
@@ -3058,9 +3082,17 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
                sd_print_result(sdkp, res);
                if (driver_byte(res) & DRIVER_SENSE)
                        sd_print_sense_hdr(sdkp, &sshdr);
+               if (scsi_sense_valid(&sshdr) &&
+                       /* 0x3a is medium not present */
+                       sshdr.asc == 0x3a)
+                       res = 0;
        }
 
-       return res;
+       /* SCSI error codes must not go to the generic layer */
+       if (res)
+               return -EIO;
+
+       return 0;
 }
 
 /*
@@ -3078,7 +3110,7 @@ static void sd_shutdown(struct device *dev)
        if (pm_runtime_suspended(dev))
                goto exit;
 
-       if (sdkp->WCE) {
+       if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
                sd_sync_cache(sdkp);
        }
@@ -3092,7 +3124,7 @@ exit:
        scsi_disk_put(sdkp);
 }
 
-static int sd_suspend(struct device *dev)
+static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
 {
        struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
        int ret = 0;
@@ -3100,16 +3132,23 @@ static int sd_suspend(struct device *dev)
        if (!sdkp)
                return 0;       /* this can happen */
 
-       if (sdkp->WCE) {
+       if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
                ret = sd_sync_cache(sdkp);
-               if (ret)
+               if (ret) {
+                       /* ignore OFFLINE device */
+                       if (ret == -ENODEV)
+                               ret = 0;
                        goto done;
+               }
        }
 
        if (sdkp->device->manage_start_stop) {
                sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+               /* an error is not worth aborting a system sleep */
                ret = sd_start_stop_device(sdkp, 0);
+               if (ignore_stop_errors)
+                       ret = 0;
        }
 
 done:
@@ -3117,6 +3156,16 @@ done:
        return ret;
 }
 
+static int sd_suspend_system(struct device *dev)
+{
+       return sd_suspend_common(dev, true);
+}
+
+static int sd_suspend_runtime(struct device *dev)
+{
+       return sd_suspend_common(dev, false);
+}
+
 static int sd_resume(struct device *dev)
 {
        struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
index 7a049de220512777dd8d1cb43e665a9c351023d5..26895ff247c59eef7f74ab300763c1137adddfba 100644 (file)
  */
 #define SD_TIMEOUT             (30 * HZ)
 #define SD_MOD_TIMEOUT         (75 * HZ)
-#define SD_FLUSH_TIMEOUT       (60 * HZ)
+/*
+ * Flush timeout is a multiplier over the standard device timeout which is
+ * user modifiable via sysfs but initially set to SD_TIMEOUT
+ */
+#define SD_FLUSH_TIMEOUT_MULTIPLIER    2
 #define SD_WRITE_SAME_TIMEOUT  (120 * HZ)
 
 /*
index 13d81c5c4ebf83dfa8d844fa6b4f64caf6725df7..5d6ed6cf12cc8c2c984e2565b028a55bedb001d8 100644 (file)
@@ -69,6 +69,7 @@ enum iscsi_uevent_e {
        ISCSI_UEVENT_LOGIN_FLASHNODE    = UEVENT_BASE + 28,
        ISCSI_UEVENT_LOGOUT_FLASHNODE   = UEVENT_BASE + 29,
        ISCSI_UEVENT_LOGOUT_FLASHNODE_SID       = UEVENT_BASE + 30,
+       ISCSI_UEVENT_SET_CHAP           = UEVENT_BASE + 31,
 
        /* up events */
        ISCSI_KEVENT_RECV_PDU           = KEVENT_BASE + 1,
@@ -309,8 +310,16 @@ enum iscsi_param_type {
        ISCSI_HOST_PARAM,       /* iscsi_host_param */
        ISCSI_NET_PARAM,        /* iscsi_net_param */
        ISCSI_FLASHNODE_PARAM,  /* iscsi_flashnode_param */
+       ISCSI_CHAP_PARAM,       /* iscsi_chap_param */
 };
 
+/* structure for minimalist usecase */
+struct iscsi_param_info {
+       uint32_t len;           /* Actual length of the param value */
+       uint16_t param;         /* iscsi param */
+       uint8_t value[0];       /* length sized value follows */
+} __packed;
+
 struct iscsi_iface_param_info {
        uint32_t iface_num;     /* iface number, 0 - n */
        uint32_t len;           /* Actual length of the param */
@@ -739,6 +748,14 @@ enum chap_type_e {
        CHAP_TYPE_IN,
 };
 
+enum iscsi_chap_param {
+       ISCSI_CHAP_PARAM_INDEX,
+       ISCSI_CHAP_PARAM_CHAP_TYPE,
+       ISCSI_CHAP_PARAM_USERNAME,
+       ISCSI_CHAP_PARAM_PASSWORD,
+       ISCSI_CHAP_PARAM_PASSWORD_LEN
+};
+
 #define ISCSI_CHAP_AUTH_NAME_MAX_LEN   256
 #define ISCSI_CHAP_AUTH_SECRET_MAX_LEN 256
 struct iscsi_chap_rec {
index d0f1602985e7efe6d6ab40dd67fbcc04e6a32276..fe7c8f3e93f81dad733c6cc6a08c2d8a65c3a8df 100644 (file)
@@ -152,6 +152,7 @@ struct iscsi_transport {
        int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
                         uint32_t *num_entries, char *buf);
        int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+       int (*set_chap) (struct Scsi_Host *shost, void *data, int len);
        int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
                                    int param, char *buf);
        int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,