2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * Returns the proper CF_* direction based on CDB.
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
47 * @dsds: number of data segment decriptors needed
49 * Returns the number of IOCB entries needed to store @dsds.
52 qla2x00_calc_iocbs_32(uint16_t dsds)
58 iocbs += (dsds - 3) / 7;
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
69 * @dsds: number of data segment decriptors needed
71 * Returns the number of IOCB entries needed to store @dsds.
74 qla2x00_calc_iocbs_64(uint16_t dsds)
80 iocbs += (dsds - 2) / 5;
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 cpu_to_le32(CONTINUE_A64_TYPE);
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151 /* We always use DIFF Bundling for best performance */
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 return scsi_prot_sg_count(cmd);
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
201 cmd = GET_CMD_SP(sp);
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
205 cpu_to_le32(COMMAND_TYPE);
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = cpu_to_le32(0);
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216 /* Three DSDs are available in the Command Type 2 IOCB */
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
227 * Seven DSDs are available in the Continuation
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
259 cmd = GET_CMD_SP(sp);
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264 /* No data transfer */
265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 cmd_pkt->byte_count = cpu_to_le32(0);
271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273 /* Two DSDs are available in the Command Type 3 IOCB */
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277 /* Load data segments */
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280 cont_a64_entry_t *cont_pkt;
282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
285 * Five DSDs are available in the Continuation
288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
305 * Returns non-zero if a failure occurred, else zero.
308 qla2x00_start_scsi(srb_t *sp)
312 scsi_qla_host_t *vha;
313 struct scsi_cmnd *cmd;
317 cmd_entry_t *cmd_pkt;
321 struct device_reg_2xxx __iomem *reg;
322 struct qla_hw_data *ha;
326 /* Setup device pointers. */
329 reg = &ha->iobase->isp;
330 cmd = GET_CMD_SP(sp);
331 req = ha->req_q_map[0];
332 rsp = ha->rsp_q_map[0];
333 /* So we know we haven't pci_map'ed anything yet */
336 /* Send marker if required */
337 if (vha->marker_needed != 0) {
338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340 return (QLA_FUNCTION_FAILED);
342 vha->marker_needed = 0;
345 /* Acquire ring specific lock */
346 spin_lock_irqsave(&ha->hardware_lock, flags);
348 /* Check for room in outstanding command list. */
349 handle = req->current_outstanding_cmd;
350 for (index = 1; index < req->num_outstanding_cmds; index++) {
352 if (handle == req->num_outstanding_cmds)
354 if (!req->outstanding_cmds[handle])
357 if (index == req->num_outstanding_cmds)
360 /* Map the sg table so we have an accurate count of sg entries needed */
361 if (scsi_sg_count(cmd)) {
362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 scsi_sg_count(cmd), cmd->sc_data_direction);
371 /* Calculate the number of request entries needed. */
372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373 if (req->cnt < (req_cnt + 2)) {
374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375 if (req->ring_index < cnt)
376 req->cnt = cnt - req->ring_index;
378 req->cnt = req->length -
379 (req->ring_index - cnt);
380 /* If still no head room then bail out */
381 if (req->cnt < (req_cnt + 2))
385 /* Build command packet */
386 req->current_outstanding_cmd = handle;
387 req->outstanding_cmds[handle] = sp;
389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
392 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393 cmd_pkt->handle = handle;
394 /* Zero out remaining portion of packet. */
395 clr_ptr = (uint32_t *)cmd_pkt + 2;
396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408 /* Build IOCB segments */
409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
415 /* Adjust ring index. */
417 if (req->ring_index == req->length) {
419 req->ring_ptr = req->ring;
423 sp->flags |= SRB_DMA_VALID;
425 /* Set chip new ring index. */
426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429 /* Manage unprocessed RIO/ZIO commands in response queue. */
430 if (vha->flags.process_response_queue &&
431 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 qla2x00_process_response_queue(rsp);
434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 return (QLA_SUCCESS);
441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
443 return (QLA_FUNCTION_FAILED);
447 * qla2x00_start_iocbs() - Execute the IOCB command
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
452 struct qla_hw_data *ha = vha->hw;
453 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
455 if (IS_P3P_TYPE(ha)) {
456 qla82xx_start_iocbs(vha);
458 /* Adjust ring index. */
460 if (req->ring_index == req->length) {
462 req->ring_ptr = req->ring;
466 /* Set chip new ring index. */
467 if (ha->mqenable || IS_QLA27XX(ha)) {
468 WRT_REG_DWORD(req->req_q_in, req->ring_index);
469 } else if (IS_QLA83XX(ha)) {
470 WRT_REG_DWORD(req->req_q_in, req->ring_index);
471 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
472 } else if (IS_QLAFX00(ha)) {
473 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
475 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
476 } else if (IS_FWI2_CAPABLE(ha)) {
477 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
478 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
480 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
482 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
488 * qla2x00_marker() - Send a marker IOCB to the firmware.
492 * @type: marker modifier
494 * Can be called from both normal and interrupt context.
496 * Returns non-zero if a failure occurred, else zero.
499 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500 struct rsp_que *rsp, uint16_t loop_id,
501 uint64_t lun, uint8_t type)
504 struct mrk_entry_24xx *mrk24 = NULL;
506 struct qla_hw_data *ha = vha->hw;
507 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
509 req = ha->req_q_map[0];
510 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
512 ql_log(ql_log_warn, base_vha, 0x3026,
513 "Failed to allocate Marker IOCB.\n");
515 return (QLA_FUNCTION_FAILED);
518 mrk->entry_type = MARKER_TYPE;
519 mrk->modifier = type;
520 if (type != MK_SYNC_ALL) {
521 if (IS_FWI2_CAPABLE(ha)) {
522 mrk24 = (struct mrk_entry_24xx *) mrk;
523 mrk24->nport_handle = cpu_to_le16(loop_id);
524 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
525 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
526 mrk24->vp_index = vha->vp_idx;
527 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
529 SET_TARGET_ID(ha, mrk->target, loop_id);
530 mrk->lun = cpu_to_le16((uint16_t)lun);
535 qla2x00_start_iocbs(vha, req);
537 return (QLA_SUCCESS);
541 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
542 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
546 unsigned long flags = 0;
548 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556 * qla2x00_issue_marker
559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
560 * Might release it, then reaquire.
562 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
565 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566 MK_SYNC_ALL) != QLA_SUCCESS)
567 return QLA_FUNCTION_FAILED;
569 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
573 vha->marker_needed = 0;
579 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
582 uint32_t *cur_dsd = NULL;
583 scsi_qla_host_t *vha;
584 struct qla_hw_data *ha;
585 struct scsi_cmnd *cmd;
586 struct scatterlist *cur_seg;
590 uint8_t first_iocb = 1;
591 uint32_t dsd_list_len;
592 struct dsd_dma *dsd_ptr;
595 cmd = GET_CMD_SP(sp);
597 /* Update entry type to indicate Command Type 3 IOCB */
598 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
600 /* No data transfer */
601 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
602 cmd_pkt->byte_count = cpu_to_le32(0);
609 /* Set transfer direction */
610 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
611 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
612 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
613 vha->qla_stats.output_requests++;
614 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
615 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
616 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
617 vha->qla_stats.input_requests++;
620 cur_seg = scsi_sglist(cmd);
621 ctx = GET_CMD_CTX_SP(sp);
624 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625 QLA_DSDS_PER_IOCB : tot_dsds;
626 tot_dsds -= avail_dsds;
627 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
629 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630 struct dsd_dma, list);
631 next_dsd = dsd_ptr->dsd_addr;
632 list_del(&dsd_ptr->list);
634 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
645 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647 *cur_dsd++ = cpu_to_le32(dsd_list_len);
649 cur_dsd = (uint32_t *)next_dsd;
653 sle_dma = sg_dma_address(cur_seg);
654 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657 cur_seg = sg_next(cur_seg);
662 /* Null termination */
666 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
674 * @dsds: number of data segment decriptors needed
676 * Returns the number of dsd list needed to store @dsds.
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds)
681 uint16_t dsd_lists = 0;
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
697 * @req: pointer to request queue
700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701 uint16_t tot_dsds, struct req_que *req)
705 scsi_qla_host_t *vha;
706 struct scsi_cmnd *cmd;
707 struct scatterlist *sg;
710 cmd = GET_CMD_SP(sp);
712 /* Update entry type to indicate Command Type 3 IOCB */
713 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
715 /* No data transfer */
716 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
717 cmd_pkt->byte_count = cpu_to_le32(0);
723 /* Set transfer direction */
724 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
725 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
726 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
727 vha->qla_stats.output_requests++;
728 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
729 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
730 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
731 vha->qla_stats.input_requests++;
734 /* One DSD is available in the Command Type 3 IOCB */
736 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
738 /* Load data segments */
740 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
742 cont_a64_entry_t *cont_pkt;
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
747 * Five DSDs are available in the Continuation
750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
755 sle_dma = sg_dma_address(sg);
756 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
763 struct fw_dif_context {
766 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
767 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
775 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
776 unsigned int protcnt)
778 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
780 switch (scsi_get_prot_type(cmd)) {
781 case SCSI_PROT_DIF_TYPE0:
783 * No check for ql2xenablehba_err_chk, as it would be an
784 * I/O error if hba tag generation is not done.
786 pkt->ref_tag = cpu_to_le32((uint32_t)
787 (0xffffffff & scsi_get_lba(cmd)));
789 if (!qla2x00_hba_err_chk_enabled(sp))
792 pkt->ref_tag_mask[0] = 0xff;
793 pkt->ref_tag_mask[1] = 0xff;
794 pkt->ref_tag_mask[2] = 0xff;
795 pkt->ref_tag_mask[3] = 0xff;
799 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800 * match LBA in CDB + N
802 case SCSI_PROT_DIF_TYPE2:
803 pkt->app_tag = cpu_to_le16(0);
804 pkt->app_tag_mask[0] = 0x0;
805 pkt->app_tag_mask[1] = 0x0;
807 pkt->ref_tag = cpu_to_le32((uint32_t)
808 (0xffffffff & scsi_get_lba(cmd)));
810 if (!qla2x00_hba_err_chk_enabled(sp))
813 /* enable ALL bytes of the ref tag */
814 pkt->ref_tag_mask[0] = 0xff;
815 pkt->ref_tag_mask[1] = 0xff;
816 pkt->ref_tag_mask[2] = 0xff;
817 pkt->ref_tag_mask[3] = 0xff;
820 /* For Type 3 protection: 16 bit GUARD only */
821 case SCSI_PROT_DIF_TYPE3:
822 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
828 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
831 case SCSI_PROT_DIF_TYPE1:
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
834 pkt->app_tag = cpu_to_le16(0);
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
838 if (!qla2x00_hba_err_chk_enabled(sp))
841 /* enable ALL bytes of the ref tag */
842 pkt->ref_tag_mask[0] = 0xff;
843 pkt->ref_tag_mask[1] = 0xff;
844 pkt->ref_tag_mask[2] = 0xff;
845 pkt->ref_tag_mask[3] = 0xff;
851 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
854 struct scatterlist *sg;
855 uint32_t cumulative_partial, sg_len;
856 dma_addr_t sg_dma_addr;
858 if (sgx->num_bytes == sgx->tot_bytes)
862 cumulative_partial = sgx->tot_partial;
864 sg_dma_addr = sg_dma_address(sg);
865 sg_len = sg_dma_len(sg);
867 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
869 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870 sgx->dma_len = (blk_sz - cumulative_partial);
871 sgx->tot_partial = 0;
872 sgx->num_bytes += blk_sz;
875 sgx->dma_len = sg_len - sgx->bytes_consumed;
876 sgx->tot_partial += sgx->dma_len;
880 sgx->bytes_consumed += sgx->dma_len;
882 if (sg_len == sgx->bytes_consumed) {
886 sgx->bytes_consumed = 0;
893 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
894 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
897 uint8_t avail_dsds = 0;
898 uint32_t dsd_list_len;
899 struct dsd_dma *dsd_ptr;
900 struct scatterlist *sg_prot;
901 uint32_t *cur_dsd = dsd;
902 uint16_t used_dsds = tot_dsds;
903 uint32_t prot_int; /* protection interval */
907 uint32_t sle_dma_len, tot_prot_dma_len = 0;
908 struct scsi_cmnd *cmd;
910 memset(&sgx, 0, sizeof(struct qla2_sgx));
912 cmd = GET_CMD_SP(sp);
913 prot_int = cmd->device->sector_size;
915 sgx.tot_bytes = scsi_bufflen(cmd);
916 sgx.cur_sg = scsi_sglist(cmd);
919 sg_prot = scsi_prot_sglist(cmd);
921 prot_int = tc->blk_sz;
922 sgx.tot_bytes = tc->bufflen;
924 sg_prot = tc->prot_sg;
930 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
932 sle_dma = sgx.dma_addr;
933 sle_dma_len = sgx.dma_len;
935 /* Allocate additional continuation packets? */
936 if (avail_dsds == 0) {
937 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938 QLA_DSDS_PER_IOCB : used_dsds;
939 dsd_list_len = (avail_dsds + 1) * 12;
940 used_dsds -= avail_dsds;
942 /* allocate tracking DS */
943 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
947 /* allocate new list */
948 dsd_ptr->dsd_addr = next_dsd =
949 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950 &dsd_ptr->dsd_list_dma);
954 * Need to cleanup only this dsd_ptr, rest
955 * will be done by sp_free_dma()
962 list_add_tail(&dsd_ptr->list,
963 &((struct crc_context *)
964 sp->u.scmd.ctx)->dsd_list);
966 sp->flags |= SRB_CRC_CTX_DSD_VALID;
968 list_add_tail(&dsd_ptr->list,
969 &(tc->ctx->dsd_list));
970 *tc->ctx_dsd_alloced = 1;
974 /* add new list to cmd iocb or last list */
975 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = dsd_list_len;
978 cur_dsd = (uint32_t *)next_dsd;
980 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982 *cur_dsd++ = cpu_to_le32(sle_dma_len);
986 /* Got a full protection interval */
987 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
990 tot_prot_dma_len += sle_dma_len;
991 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992 tot_prot_dma_len = 0;
993 sg_prot = sg_next(sg_prot);
996 partial = 1; /* So as to not re-enter this block */
1000 /* Null termination */
1008 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009 uint16_t tot_dsds, struct qla_tc_param *tc)
1012 uint8_t avail_dsds = 0;
1013 uint32_t dsd_list_len;
1014 struct dsd_dma *dsd_ptr;
1015 struct scatterlist *sg, *sgl;
1016 uint32_t *cur_dsd = dsd;
1018 uint16_t used_dsds = tot_dsds;
1019 struct scsi_cmnd *cmd;
1022 cmd = GET_CMD_SP(sp);
1023 sgl = scsi_sglist(cmd);
1032 for_each_sg(sgl, sg, tot_dsds, i) {
1035 /* Allocate additional continuation packets? */
1036 if (avail_dsds == 0) {
1037 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038 QLA_DSDS_PER_IOCB : used_dsds;
1039 dsd_list_len = (avail_dsds + 1) * 12;
1040 used_dsds -= avail_dsds;
1042 /* allocate tracking DS */
1043 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1047 /* allocate new list */
1048 dsd_ptr->dsd_addr = next_dsd =
1049 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050 &dsd_ptr->dsd_list_dma);
1054 * Need to cleanup only this dsd_ptr, rest
1055 * will be done by sp_free_dma()
1062 list_add_tail(&dsd_ptr->list,
1063 &((struct crc_context *)
1064 sp->u.scmd.ctx)->dsd_list);
1066 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1068 list_add_tail(&dsd_ptr->list,
1069 &(tc->ctx->dsd_list));
1070 *tc->ctx_dsd_alloced = 1;
1073 /* add new list to cmd iocb or last list */
1074 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076 *cur_dsd++ = dsd_list_len;
1077 cur_dsd = (uint32_t *)next_dsd;
1079 sle_dma = sg_dma_address(sg);
1081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1087 /* Null termination */
1095 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1099 uint8_t avail_dsds = 0;
1100 uint32_t dsd_list_len;
1101 struct dsd_dma *dsd_ptr;
1102 struct scatterlist *sg, *sgl;
1104 struct scsi_cmnd *cmd;
1105 uint32_t *cur_dsd = dsd;
1106 uint16_t used_dsds = tot_dsds;
1107 struct scsi_qla_host *vha;
1110 cmd = GET_CMD_SP(sp);
1111 sgl = scsi_prot_sglist(cmd);
1121 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122 "%s: enter\n", __func__);
1124 for_each_sg(sgl, sg, tot_dsds, i) {
1127 /* Allocate additional continuation packets? */
1128 if (avail_dsds == 0) {
1129 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130 QLA_DSDS_PER_IOCB : used_dsds;
1131 dsd_list_len = (avail_dsds + 1) * 12;
1132 used_dsds -= avail_dsds;
1134 /* allocate tracking DS */
1135 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1139 /* allocate new list */
1140 dsd_ptr->dsd_addr = next_dsd =
1141 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142 &dsd_ptr->dsd_list_dma);
1146 * Need to cleanup only this dsd_ptr, rest
1147 * will be done by sp_free_dma()
1154 list_add_tail(&dsd_ptr->list,
1155 &((struct crc_context *)
1156 sp->u.scmd.ctx)->dsd_list);
1158 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1160 list_add_tail(&dsd_ptr->list,
1161 &(tc->ctx->dsd_list));
1162 *tc->ctx_dsd_alloced = 1;
1165 /* add new list to cmd iocb or last list */
1166 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168 *cur_dsd++ = dsd_list_len;
1169 cur_dsd = (uint32_t *)next_dsd;
1171 sle_dma = sg_dma_address(sg);
1173 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1179 /* Null termination */
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 * Type 6 IOCB types.
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1195 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1198 uint32_t *cur_dsd, *fcp_dl;
1199 scsi_qla_host_t *vha;
1200 struct scsi_cmnd *cmd;
1201 uint32_t total_bytes = 0;
1202 uint32_t data_bytes;
1204 uint8_t bundling = 1;
1207 struct crc_context *crc_ctx_pkt = NULL;
1208 struct qla_hw_data *ha;
1209 uint8_t additional_fcpcdb_len;
1210 uint16_t fcp_cmnd_len;
1211 struct fcp_cmnd *fcp_cmnd;
1212 dma_addr_t crc_ctx_dma;
1214 cmd = GET_CMD_SP(sp);
1216 /* Update entry type to indicate Command Type CRC_2 IOCB */
1217 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1222 /* No data transfer */
1223 data_bytes = scsi_bufflen(cmd);
1224 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225 cmd_pkt->byte_count = cpu_to_le32(0);
1229 cmd_pkt->vp_index = sp->vha->vp_idx;
1231 /* Set transfer direction */
1232 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233 cmd_pkt->control_flags =
1234 cpu_to_le16(CF_WRITE_DATA);
1235 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236 cmd_pkt->control_flags =
1237 cpu_to_le16(CF_READ_DATA);
1240 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1246 /* Allocate CRC context from global pool */
1247 crc_ctx_pkt = sp->u.scmd.ctx =
1248 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1251 goto crc_queuing_error;
1253 /* Zero out CTX area. */
1254 clr_ptr = (uint8_t *)crc_ctx_pkt;
1255 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1257 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1259 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262 crc_ctx_pkt->handle = cmd_pkt->handle;
1264 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1266 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1269 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1273 /* Determine SCSI command length -- align to 4 byte boundary */
1274 if (cmd->cmd_len > 16) {
1275 additional_fcpcdb_len = cmd->cmd_len - 16;
1276 if ((cmd->cmd_len % 4) != 0) {
1277 /* SCSI cmd > 16 bytes must be multiple of 4 */
1278 goto crc_queuing_error;
1280 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1282 additional_fcpcdb_len = 0;
1283 fcp_cmnd_len = 12 + 16 + 4;
1286 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1288 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290 fcp_cmnd->additional_cdb_len |= 1;
1291 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 2;
1294 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301 fcp_cmnd->task_management = 0;
1302 fcp_cmnd->task_attribute = TSK_SIMPLE;
1304 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1306 /* Compute dif len and adjust data len to incude protection */
1308 blk_size = cmd->device->sector_size;
1309 dif_bytes = (data_bytes / blk_size) * 8;
1311 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_WRITE_STRIP:
1314 total_bytes = data_bytes;
1315 data_bytes += dif_bytes;
1318 case SCSI_PROT_READ_STRIP:
1319 case SCSI_PROT_WRITE_INSERT:
1320 case SCSI_PROT_READ_PASS:
1321 case SCSI_PROT_WRITE_PASS:
1322 total_bytes = data_bytes + dif_bytes;
1328 if (!qla2x00_hba_err_chk_enabled(sp))
1329 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330 /* HBA error checking enabled */
1331 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334 SCSI_PROT_DIF_TYPE2))
1335 fw_prot_opts |= BIT_10;
1336 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337 SCSI_PROT_DIF_TYPE3)
1338 fw_prot_opts |= BIT_11;
1342 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345 * Configure Bundling if we need to fetch interlaving
1346 * protection PCI accesses
1348 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1352 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355 /* Finish the common fields of CRC pkt */
1356 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360 /* Fibre channel byte count */
1361 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363 additional_fcpcdb_len);
1364 *fcp_dl = htonl(total_bytes);
1366 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367 cmd_pkt->byte_count = cpu_to_le32(0);
1370 /* Walks data segments */
1372 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1374 if (!bundling && tot_prot_dsds) {
1375 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376 cur_dsd, tot_dsds, NULL))
1377 goto crc_queuing_error;
1378 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379 (tot_dsds - tot_prot_dsds), NULL))
1380 goto crc_queuing_error;
1382 if (bundling && tot_prot_dsds) {
1383 /* Walks dif segments */
1384 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387 tot_prot_dsds, NULL))
1388 goto crc_queuing_error;
1393 /* Cleanup will be performed by the caller */
1395 return QLA_FUNCTION_FAILED;
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1402 * Returns non-zero if a failure occurred, else zero.
1405 qla24xx_start_scsi(srb_t *sp)
1408 unsigned long flags;
1412 struct cmd_type_7 *cmd_pkt;
1416 struct req_que *req = NULL;
1417 struct rsp_que *rsp = NULL;
1418 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419 struct scsi_qla_host *vha = sp->vha;
1420 struct qla_hw_data *ha = vha->hw;
1422 /* Setup device pointers. */
1426 /* So we know we haven't pci_map'ed anything yet */
1429 /* Send marker if required */
1430 if (vha->marker_needed != 0) {
1431 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1433 return QLA_FUNCTION_FAILED;
1434 vha->marker_needed = 0;
1437 /* Acquire ring specific lock */
1438 spin_lock_irqsave(&ha->hardware_lock, flags);
1440 /* Check for room in outstanding command list. */
1441 handle = req->current_outstanding_cmd;
1442 for (index = 1; index < req->num_outstanding_cmds; index++) {
1444 if (handle == req->num_outstanding_cmds)
1446 if (!req->outstanding_cmds[handle])
1449 if (index == req->num_outstanding_cmds)
1452 /* Map the sg table so we have an accurate count of sg entries needed */
1453 if (scsi_sg_count(cmd)) {
1454 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455 scsi_sg_count(cmd), cmd->sc_data_direction);
1456 if (unlikely(!nseg))
1462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463 if (req->cnt < (req_cnt + 2)) {
1464 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465 RD_REG_DWORD_RELAXED(req->req_q_out);
1466 if (req->ring_index < cnt)
1467 req->cnt = cnt - req->ring_index;
1469 req->cnt = req->length -
1470 (req->ring_index - cnt);
1471 if (req->cnt < (req_cnt + 2))
1475 /* Build command packet. */
1476 req->current_outstanding_cmd = handle;
1477 req->outstanding_cmds[handle] = sp;
1478 sp->handle = handle;
1479 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480 req->cnt -= req_cnt;
1482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1485 /* Zero out remaining portion of packet. */
1486 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487 clr_ptr = (uint32_t *)cmd_pkt + 2;
1488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1491 /* Set NPORT-ID and LUN number*/
1492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496 cmd_pkt->vp_index = sp->vha->vp_idx;
1498 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1501 cmd_pkt->task = TSK_SIMPLE;
1503 /* Load SCSI command packet. */
1504 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1507 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1509 /* Build IOCB segments */
1510 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1512 /* Set total data segment count. */
1513 cmd_pkt->entry_count = (uint8_t)req_cnt;
1515 /* Adjust ring index. */
1517 if (req->ring_index == req->length) {
1518 req->ring_index = 0;
1519 req->ring_ptr = req->ring;
1523 sp->flags |= SRB_DMA_VALID;
1525 /* Set chip new ring index. */
1526 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1529 /* Manage unprocessed RIO/ZIO commands in response queue. */
1530 if (vha->flags.process_response_queue &&
1531 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1532 qla24xx_process_response_queue(vha, rsp);
1534 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1539 scsi_dma_unmap(cmd);
1541 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1543 return QLA_FUNCTION_FAILED;
1547 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548 * @sp: command to send to the ISP
1550 * Returns non-zero if a failure occurred, else zero.
1553 qla24xx_dif_start_scsi(srb_t *sp)
1556 unsigned long flags;
1561 uint16_t req_cnt = 0;
1563 uint16_t tot_prot_dsds;
1564 uint16_t fw_prot_opts = 0;
1565 struct req_que *req = NULL;
1566 struct rsp_que *rsp = NULL;
1567 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1568 struct scsi_qla_host *vha = sp->vha;
1569 struct qla_hw_data *ha = vha->hw;
1570 struct cmd_type_crc_2 *cmd_pkt;
1571 uint32_t status = 0;
1573 #define QDSS_GOT_Q_SPACE BIT_0
1575 /* Only process protection or >16 cdb in this routine */
1576 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577 if (cmd->cmd_len <= 16)
1578 return qla24xx_start_scsi(sp);
1581 /* Setup device pointers. */
1585 /* So we know we haven't pci_map'ed anything yet */
1588 /* Send marker if required */
1589 if (vha->marker_needed != 0) {
1590 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1592 return QLA_FUNCTION_FAILED;
1593 vha->marker_needed = 0;
1596 /* Acquire ring specific lock */
1597 spin_lock_irqsave(&ha->hardware_lock, flags);
1599 /* Check for room in outstanding command list. */
1600 handle = req->current_outstanding_cmd;
1601 for (index = 1; index < req->num_outstanding_cmds; index++) {
1603 if (handle == req->num_outstanding_cmds)
1605 if (!req->outstanding_cmds[handle])
1609 if (index == req->num_outstanding_cmds)
1612 /* Compute number of required data segments */
1613 /* Map the sg table so we have an accurate count of sg entries needed */
1614 if (scsi_sg_count(cmd)) {
1615 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616 scsi_sg_count(cmd), cmd->sc_data_direction);
1617 if (unlikely(!nseg))
1620 sp->flags |= SRB_DMA_VALID;
1622 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624 struct qla2_sgx sgx;
1627 memset(&sgx, 0, sizeof(struct qla2_sgx));
1628 sgx.tot_bytes = scsi_bufflen(cmd);
1629 sgx.cur_sg = scsi_sglist(cmd);
1633 while (qla24xx_get_one_block_sg(
1634 cmd->device->sector_size, &sgx, &partial))
1640 /* number of required data segments */
1643 /* Compute number of required protection segments */
1644 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647 if (unlikely(!nseg))
1650 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1652 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1661 /* Total Data and protection sg segment(s) */
1662 tot_prot_dsds = nseg;
1664 if (req->cnt < (req_cnt + 2)) {
1665 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666 RD_REG_DWORD_RELAXED(req->req_q_out);
1667 if (req->ring_index < cnt)
1668 req->cnt = cnt - req->ring_index;
1670 req->cnt = req->length -
1671 (req->ring_index - cnt);
1672 if (req->cnt < (req_cnt + 2))
1676 status |= QDSS_GOT_Q_SPACE;
1678 /* Build header part of command packet (excluding the OPCODE). */
1679 req->current_outstanding_cmd = handle;
1680 req->outstanding_cmds[handle] = sp;
1681 sp->handle = handle;
1682 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1683 req->cnt -= req_cnt;
1685 /* Fill-in common area */
1686 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1689 clr_ptr = (uint32_t *)cmd_pkt + 2;
1690 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1692 /* Set NPORT-ID and LUN number*/
1693 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1698 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1699 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1701 /* Total Data and protection segment(s) */
1702 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1704 /* Build IOCB segments and adjust for data protection segments */
1705 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1710 cmd_pkt->entry_count = (uint8_t)req_cnt;
1711 /* Specify response queue number where completion should happen */
1712 cmd_pkt->entry_status = (uint8_t) rsp->id;
1713 cmd_pkt->timeout = cpu_to_le16(0);
1716 /* Adjust ring index. */
1718 if (req->ring_index == req->length) {
1719 req->ring_index = 0;
1720 req->ring_ptr = req->ring;
1724 /* Set chip new ring index. */
1725 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1728 /* Manage unprocessed RIO/ZIO commands in response queue. */
1729 if (vha->flags.process_response_queue &&
1730 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731 qla24xx_process_response_queue(vha, rsp);
1733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1738 if (status & QDSS_GOT_Q_SPACE) {
1739 req->outstanding_cmds[handle] = NULL;
1740 req->cnt += req_cnt;
1742 /* Cleanup will be performed by the caller (queuecommand) */
1744 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1745 return QLA_FUNCTION_FAILED;
1749 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750 * @sp: command to send to the ISP
1752 * Returns non-zero if a failure occurred, else zero.
1755 qla2xxx_start_scsi_mq(srb_t *sp)
1758 unsigned long flags;
1762 struct cmd_type_7 *cmd_pkt;
1766 struct req_que *req = NULL;
1767 struct rsp_que *rsp = NULL;
1768 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1769 struct scsi_qla_host *vha = sp->fcport->vha;
1770 struct qla_hw_data *ha = vha->hw;
1771 struct qla_qpair *qpair = sp->qpair;
1773 /* Acquire qpair specific lock */
1774 spin_lock_irqsave(&qpair->qp_lock, flags);
1776 /* Setup qpair pointers */
1780 /* So we know we haven't pci_map'ed anything yet */
1783 /* Send marker if required */
1784 if (vha->marker_needed != 0) {
1785 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1787 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1788 return QLA_FUNCTION_FAILED;
1790 vha->marker_needed = 0;
1793 /* Check for room in outstanding command list. */
1794 handle = req->current_outstanding_cmd;
1795 for (index = 1; index < req->num_outstanding_cmds; index++) {
1797 if (handle == req->num_outstanding_cmds)
1799 if (!req->outstanding_cmds[handle])
1802 if (index == req->num_outstanding_cmds)
1805 /* Map the sg table so we have an accurate count of sg entries needed */
1806 if (scsi_sg_count(cmd)) {
1807 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1808 scsi_sg_count(cmd), cmd->sc_data_direction);
1809 if (unlikely(!nseg))
1815 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1816 if (req->cnt < (req_cnt + 2)) {
1817 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1818 RD_REG_DWORD_RELAXED(req->req_q_out);
1819 if (req->ring_index < cnt)
1820 req->cnt = cnt - req->ring_index;
1822 req->cnt = req->length -
1823 (req->ring_index - cnt);
1824 if (req->cnt < (req_cnt + 2))
1828 /* Build command packet. */
1829 req->current_outstanding_cmd = handle;
1830 req->outstanding_cmds[handle] = sp;
1831 sp->handle = handle;
1832 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1833 req->cnt -= req_cnt;
1835 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1836 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1838 /* Zero out remaining portion of packet. */
1839 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1840 clr_ptr = (uint32_t *)cmd_pkt + 2;
1841 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1842 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1844 /* Set NPORT-ID and LUN number*/
1845 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1846 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1847 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1848 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1849 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1851 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1852 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1854 cmd_pkt->task = TSK_SIMPLE;
1856 /* Load SCSI command packet. */
1857 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1858 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1860 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1862 /* Build IOCB segments */
1863 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1865 /* Set total data segment count. */
1866 cmd_pkt->entry_count = (uint8_t)req_cnt;
1868 /* Adjust ring index. */
1870 if (req->ring_index == req->length) {
1871 req->ring_index = 0;
1872 req->ring_ptr = req->ring;
1876 sp->flags |= SRB_DMA_VALID;
1878 /* Set chip new ring index. */
1879 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1881 /* Manage unprocessed RIO/ZIO commands in response queue. */
1882 if (vha->flags.process_response_queue &&
1883 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1884 qla24xx_process_response_queue(vha, rsp);
1886 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1891 scsi_dma_unmap(cmd);
1893 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1895 return QLA_FUNCTION_FAILED;
1900 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1901 * @sp: command to send to the ISP
1903 * Returns non-zero if a failure occurred, else zero.
1906 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1909 unsigned long flags;
1914 uint16_t req_cnt = 0;
1916 uint16_t tot_prot_dsds;
1917 uint16_t fw_prot_opts = 0;
1918 struct req_que *req = NULL;
1919 struct rsp_que *rsp = NULL;
1920 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1921 struct scsi_qla_host *vha = sp->fcport->vha;
1922 struct qla_hw_data *ha = vha->hw;
1923 struct cmd_type_crc_2 *cmd_pkt;
1924 uint32_t status = 0;
1925 struct qla_qpair *qpair = sp->qpair;
1927 #define QDSS_GOT_Q_SPACE BIT_0
1929 /* Check for host side state */
1930 if (!qpair->online) {
1931 cmd->result = DID_NO_CONNECT << 16;
1932 return QLA_INTERFACE_ERROR;
1935 if (!qpair->difdix_supported &&
1936 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1937 cmd->result = DID_NO_CONNECT << 16;
1938 return QLA_INTERFACE_ERROR;
1941 /* Only process protection or >16 cdb in this routine */
1942 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1943 if (cmd->cmd_len <= 16)
1944 return qla2xxx_start_scsi_mq(sp);
1947 spin_lock_irqsave(&qpair->qp_lock, flags);
1949 /* Setup qpair pointers */
1953 /* So we know we haven't pci_map'ed anything yet */
1956 /* Send marker if required */
1957 if (vha->marker_needed != 0) {
1958 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1960 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1961 return QLA_FUNCTION_FAILED;
1963 vha->marker_needed = 0;
1966 /* Check for room in outstanding command list. */
1967 handle = req->current_outstanding_cmd;
1968 for (index = 1; index < req->num_outstanding_cmds; index++) {
1970 if (handle == req->num_outstanding_cmds)
1972 if (!req->outstanding_cmds[handle])
1976 if (index == req->num_outstanding_cmds)
1979 /* Compute number of required data segments */
1980 /* Map the sg table so we have an accurate count of sg entries needed */
1981 if (scsi_sg_count(cmd)) {
1982 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1983 scsi_sg_count(cmd), cmd->sc_data_direction);
1984 if (unlikely(!nseg))
1987 sp->flags |= SRB_DMA_VALID;
1989 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1990 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1991 struct qla2_sgx sgx;
1994 memset(&sgx, 0, sizeof(struct qla2_sgx));
1995 sgx.tot_bytes = scsi_bufflen(cmd);
1996 sgx.cur_sg = scsi_sglist(cmd);
2000 while (qla24xx_get_one_block_sg(
2001 cmd->device->sector_size, &sgx, &partial))
2007 /* number of required data segments */
2010 /* Compute number of required protection segments */
2011 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2012 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2013 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2014 if (unlikely(!nseg))
2017 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2019 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2020 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2021 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2028 /* Total Data and protection sg segment(s) */
2029 tot_prot_dsds = nseg;
2031 if (req->cnt < (req_cnt + 2)) {
2032 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2033 RD_REG_DWORD_RELAXED(req->req_q_out);
2034 if (req->ring_index < cnt)
2035 req->cnt = cnt - req->ring_index;
2037 req->cnt = req->length -
2038 (req->ring_index - cnt);
2039 if (req->cnt < (req_cnt + 2))
2043 status |= QDSS_GOT_Q_SPACE;
2045 /* Build header part of command packet (excluding the OPCODE). */
2046 req->current_outstanding_cmd = handle;
2047 req->outstanding_cmds[handle] = sp;
2048 sp->handle = handle;
2049 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2050 req->cnt -= req_cnt;
2052 /* Fill-in common area */
2053 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2054 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2056 clr_ptr = (uint32_t *)cmd_pkt + 2;
2057 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2059 /* Set NPORT-ID and LUN number*/
2060 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2061 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2062 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2063 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2065 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2066 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2068 /* Total Data and protection segment(s) */
2069 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2071 /* Build IOCB segments and adjust for data protection segments */
2072 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2073 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2077 cmd_pkt->entry_count = (uint8_t)req_cnt;
2078 cmd_pkt->timeout = cpu_to_le16(0);
2081 /* Adjust ring index. */
2083 if (req->ring_index == req->length) {
2084 req->ring_index = 0;
2085 req->ring_ptr = req->ring;
2089 /* Set chip new ring index. */
2090 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2092 /* Manage unprocessed RIO/ZIO commands in response queue. */
2093 if (vha->flags.process_response_queue &&
2094 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2095 qla24xx_process_response_queue(vha, rsp);
2097 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2102 if (status & QDSS_GOT_Q_SPACE) {
2103 req->outstanding_cmds[handle] = NULL;
2104 req->cnt += req_cnt;
2106 /* Cleanup will be performed by the caller (queuecommand) */
2108 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2109 return QLA_FUNCTION_FAILED;
2112 /* Generic Control-SRB manipulation functions. */
2114 /* hardware_lock assumed to be held. */
2117 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2119 scsi_qla_host_t *vha = qpair->vha;
2120 struct qla_hw_data *ha = vha->hw;
2121 struct req_que *req = qpair->req;
2122 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2123 uint32_t index, handle;
2125 uint16_t cnt, req_cnt;
2132 goto skip_cmd_array;
2134 /* Check for room in outstanding command list. */
2135 handle = req->current_outstanding_cmd;
2136 for (index = 1; index < req->num_outstanding_cmds; index++) {
2138 if (handle == req->num_outstanding_cmds)
2140 if (!req->outstanding_cmds[handle])
2143 if (index == req->num_outstanding_cmds) {
2144 ql_log(ql_log_warn, vha, 0x700b,
2145 "No room on outstanding cmd array.\n");
2149 /* Prep command array. */
2150 req->current_outstanding_cmd = handle;
2151 req->outstanding_cmds[handle] = sp;
2152 sp->handle = handle;
2154 /* Adjust entry-counts as needed. */
2155 if (sp->type != SRB_SCSI_CMD)
2156 req_cnt = sp->iocbs;
2159 /* Check for room on request queue. */
2160 if (req->cnt < req_cnt + 2) {
2161 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2162 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2163 else if (IS_P3P_TYPE(ha))
2164 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2165 else if (IS_FWI2_CAPABLE(ha))
2166 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2167 else if (IS_QLAFX00(ha))
2168 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2170 cnt = qla2x00_debounce_register(
2171 ISP_REQ_Q_OUT(ha, ®->isp));
2173 if (req->ring_index < cnt)
2174 req->cnt = cnt - req->ring_index;
2176 req->cnt = req->length -
2177 (req->ring_index - cnt);
2179 if (req->cnt < req_cnt + 2)
2183 req->cnt -= req_cnt;
2184 pkt = req->ring_ptr;
2185 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2186 if (IS_QLAFX00(ha)) {
2187 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2188 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2190 pkt->entry_count = req_cnt;
2191 pkt->handle = handle;
2195 qpair->tgt_counters.num_alloc_iocb_failed++;
2200 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2202 scsi_qla_host_t *vha = qpair->vha;
2204 if (qla2x00_reset_active(vha))
2207 return __qla2x00_alloc_iocbs(qpair, sp);
2211 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2213 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2217 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2219 struct srb_iocb *lio = &sp->u.iocb_cmd;
2221 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2222 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2223 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2224 logio->control_flags |= LCF_NVME_PRLI;
2226 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2227 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2228 logio->port_id[1] = sp->fcport->d_id.b.area;
2229 logio->port_id[2] = sp->fcport->d_id.b.domain;
2230 logio->vp_index = sp->vha->vp_idx;
2234 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2236 struct srb_iocb *lio = &sp->u.iocb_cmd;
2238 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2239 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2241 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2242 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2243 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2244 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2245 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2246 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2247 logio->port_id[1] = sp->fcport->d_id.b.area;
2248 logio->port_id[2] = sp->fcport->d_id.b.domain;
2249 logio->vp_index = sp->vha->vp_idx;
2253 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2255 struct qla_hw_data *ha = sp->vha->hw;
2256 struct srb_iocb *lio = &sp->u.iocb_cmd;
2259 mbx->entry_type = MBX_IOCB_TYPE;
2260 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2261 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2262 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2263 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2264 if (HAS_EXTENDED_IDS(ha)) {
2265 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2266 mbx->mb10 = cpu_to_le16(opts);
2268 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2270 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2271 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2272 sp->fcport->d_id.b.al_pa);
2273 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2277 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2279 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2280 logio->control_flags =
2281 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2282 if (!sp->fcport->se_sess ||
2283 !sp->fcport->keep_nport_handle)
2284 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2285 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2286 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2287 logio->port_id[1] = sp->fcport->d_id.b.area;
2288 logio->port_id[2] = sp->fcport->d_id.b.domain;
2289 logio->vp_index = sp->vha->vp_idx;
2293 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2295 struct qla_hw_data *ha = sp->vha->hw;
2297 mbx->entry_type = MBX_IOCB_TYPE;
2298 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2299 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2300 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2301 cpu_to_le16(sp->fcport->loop_id):
2302 cpu_to_le16(sp->fcport->loop_id << 8);
2303 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2304 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2305 sp->fcport->d_id.b.al_pa);
2306 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2307 /* Implicit: mbx->mbx10 = 0. */
2311 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2313 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2314 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2315 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2316 logio->vp_index = sp->vha->vp_idx;
2320 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2322 struct qla_hw_data *ha = sp->vha->hw;
2324 mbx->entry_type = MBX_IOCB_TYPE;
2325 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2326 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2327 if (HAS_EXTENDED_IDS(ha)) {
2328 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2329 mbx->mb10 = cpu_to_le16(BIT_0);
2331 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2333 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2334 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2335 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2336 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2337 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2341 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2345 struct fc_port *fcport = sp->fcport;
2346 scsi_qla_host_t *vha = fcport->vha;
2347 struct qla_hw_data *ha = vha->hw;
2348 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2349 struct req_que *req = vha->req;
2351 flags = iocb->u.tmf.flags;
2352 lun = iocb->u.tmf.lun;
2354 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2355 tsk->entry_count = 1;
2356 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2357 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2358 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2359 tsk->control_flags = cpu_to_le32(flags);
2360 tsk->port_id[0] = fcport->d_id.b.al_pa;
2361 tsk->port_id[1] = fcport->d_id.b.area;
2362 tsk->port_id[2] = fcport->d_id.b.domain;
2363 tsk->vp_index = fcport->vha->vp_idx;
2365 if (flags == TCF_LUN_RESET) {
2366 int_to_scsilun(lun, &tsk->lun);
2367 host_to_fcp_swap((uint8_t *)&tsk->lun,
2373 qla2x00_els_dcmd_sp_free(void *data)
2376 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2380 if (elsio->u.els_logo.els_logo_pyld)
2381 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2382 elsio->u.els_logo.els_logo_pyld,
2383 elsio->u.els_logo.els_logo_pyld_dma);
2385 del_timer(&elsio->timer);
2390 qla2x00_els_dcmd_iocb_timeout(void *data)
2393 fc_port_t *fcport = sp->fcport;
2394 struct scsi_qla_host *vha = sp->vha;
2395 struct qla_hw_data *ha = vha->hw;
2396 struct srb_iocb *lio = &sp->u.iocb_cmd;
2397 unsigned long flags = 0;
2399 ql_dbg(ql_dbg_io, vha, 0x3069,
2400 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2401 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2402 fcport->d_id.b.al_pa);
2404 /* Abort the exchange */
2405 spin_lock_irqsave(&ha->hardware_lock, flags);
2406 if (ha->isp_ops->abort_command(sp)) {
2407 ql_dbg(ql_dbg_io, vha, 0x3070,
2408 "mbx abort_command failed.\n");
2410 ql_dbg(ql_dbg_io, vha, 0x3071,
2411 "mbx abort_command success.\n");
2413 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2415 complete(&lio->u.els_logo.comp);
2419 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2422 fc_port_t *fcport = sp->fcport;
2423 struct srb_iocb *lio = &sp->u.iocb_cmd;
2424 struct scsi_qla_host *vha = sp->vha;
2426 ql_dbg(ql_dbg_io, vha, 0x3072,
2427 "%s hdl=%x, portid=%02x%02x%02x done\n",
2428 sp->name, sp->handle, fcport->d_id.b.domain,
2429 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2431 complete(&lio->u.els_logo.comp);
2435 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2436 port_id_t remote_did)
2439 fc_port_t *fcport = NULL;
2440 struct srb_iocb *elsio = NULL;
2441 struct qla_hw_data *ha = vha->hw;
2442 struct els_logo_payload logo_pyld;
2443 int rval = QLA_SUCCESS;
2445 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2447 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2451 /* Alloc SRB structure */
2452 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2455 ql_log(ql_log_info, vha, 0x70e6,
2456 "SRB allocation failed\n");
2460 elsio = &sp->u.iocb_cmd;
2461 fcport->loop_id = 0xFFFF;
2462 fcport->d_id.b.domain = remote_did.b.domain;
2463 fcport->d_id.b.area = remote_did.b.area;
2464 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2466 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2467 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2469 sp->type = SRB_ELS_DCMD;
2470 sp->name = "ELS_DCMD";
2471 sp->fcport = fcport;
2472 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2473 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2474 sp->done = qla2x00_els_dcmd_sp_done;
2475 sp->free = qla2x00_els_dcmd_sp_free;
2477 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2478 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2481 if (!elsio->u.els_logo.els_logo_pyld) {
2483 return QLA_FUNCTION_FAILED;
2486 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2488 elsio->u.els_logo.els_cmd = els_opcode;
2489 logo_pyld.opcode = els_opcode;
2490 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2491 logo_pyld.s_id[1] = vha->d_id.b.area;
2492 logo_pyld.s_id[2] = vha->d_id.b.domain;
2493 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2494 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2496 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2497 sizeof(struct els_logo_payload));
2499 rval = qla2x00_start_sp(sp);
2500 if (rval != QLA_SUCCESS) {
2502 return QLA_FUNCTION_FAILED;
2505 ql_dbg(ql_dbg_io, vha, 0x3074,
2506 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2507 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2508 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2510 wait_for_completion(&elsio->u.els_logo.comp);
2517 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2519 scsi_qla_host_t *vha = sp->vha;
2520 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2522 els_iocb->entry_type = ELS_IOCB_TYPE;
2523 els_iocb->entry_count = 1;
2524 els_iocb->sys_define = 0;
2525 els_iocb->entry_status = 0;
2526 els_iocb->handle = sp->handle;
2527 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2528 els_iocb->tx_dsd_count = 1;
2529 els_iocb->vp_index = vha->vp_idx;
2530 els_iocb->sof_type = EST_SOFI3;
2531 els_iocb->rx_dsd_count = 0;
2532 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2534 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2535 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2536 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2537 els_iocb->control_flags = 0;
2539 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2540 els_iocb->tx_address[0] =
2541 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2542 els_iocb->tx_address[1] =
2543 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2544 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2546 els_iocb->rx_byte_count = 0;
2547 els_iocb->rx_address[0] = 0;
2548 els_iocb->rx_address[1] = 0;
2549 els_iocb->rx_len = 0;
2551 sp->vha->qla_stats.control_requests++;
2555 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2557 struct bsg_job *bsg_job = sp->u.bsg_job;
2558 struct fc_bsg_request *bsg_request = bsg_job->request;
2560 els_iocb->entry_type = ELS_IOCB_TYPE;
2561 els_iocb->entry_count = 1;
2562 els_iocb->sys_define = 0;
2563 els_iocb->entry_status = 0;
2564 els_iocb->handle = sp->handle;
2565 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2566 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2567 els_iocb->vp_index = sp->vha->vp_idx;
2568 els_iocb->sof_type = EST_SOFI3;
2569 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2572 sp->type == SRB_ELS_CMD_RPT ?
2573 bsg_request->rqst_data.r_els.els_code :
2574 bsg_request->rqst_data.h_els.command_code;
2575 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2576 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2577 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2578 els_iocb->control_flags = 0;
2579 els_iocb->rx_byte_count =
2580 cpu_to_le32(bsg_job->reply_payload.payload_len);
2581 els_iocb->tx_byte_count =
2582 cpu_to_le32(bsg_job->request_payload.payload_len);
2584 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2585 (bsg_job->request_payload.sg_list)));
2586 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2587 (bsg_job->request_payload.sg_list)));
2588 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2589 (bsg_job->request_payload.sg_list));
2591 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2592 (bsg_job->reply_payload.sg_list)));
2593 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2594 (bsg_job->reply_payload.sg_list)));
2595 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2596 (bsg_job->reply_payload.sg_list));
2598 sp->vha->qla_stats.control_requests++;
2602 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2604 uint16_t avail_dsds;
2606 struct scatterlist *sg;
2609 scsi_qla_host_t *vha = sp->vha;
2610 struct qla_hw_data *ha = vha->hw;
2611 struct bsg_job *bsg_job = sp->u.bsg_job;
2612 int loop_iterartion = 0;
2613 int entry_count = 1;
2615 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2616 ct_iocb->entry_type = CT_IOCB_TYPE;
2617 ct_iocb->entry_status = 0;
2618 ct_iocb->handle1 = sp->handle;
2619 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2620 ct_iocb->status = cpu_to_le16(0);
2621 ct_iocb->control_flags = cpu_to_le16(0);
2622 ct_iocb->timeout = 0;
2623 ct_iocb->cmd_dsd_count =
2624 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2625 ct_iocb->total_dsd_count =
2626 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2627 ct_iocb->req_bytecount =
2628 cpu_to_le32(bsg_job->request_payload.payload_len);
2629 ct_iocb->rsp_bytecount =
2630 cpu_to_le32(bsg_job->reply_payload.payload_len);
2632 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2633 (bsg_job->request_payload.sg_list)));
2634 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2635 (bsg_job->request_payload.sg_list)));
2636 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2638 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2639 (bsg_job->reply_payload.sg_list)));
2640 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2641 (bsg_job->reply_payload.sg_list)));
2642 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2645 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2647 tot_dsds = bsg_job->reply_payload.sg_cnt;
2649 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2651 cont_a64_entry_t *cont_pkt;
2653 /* Allocate additional continuation packets? */
2654 if (avail_dsds == 0) {
2656 * Five DSDs are available in the Cont.
2659 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2660 vha->hw->req_q_map[0]);
2661 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2666 sle_dma = sg_dma_address(sg);
2667 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2668 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2669 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2673 ct_iocb->entry_count = entry_count;
2675 sp->vha->qla_stats.control_requests++;
2679 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2681 uint16_t avail_dsds;
2683 struct scatterlist *sg;
2686 scsi_qla_host_t *vha = sp->vha;
2687 struct qla_hw_data *ha = vha->hw;
2688 struct bsg_job *bsg_job = sp->u.bsg_job;
2689 int loop_iterartion = 0;
2690 int entry_count = 1;
2692 ct_iocb->entry_type = CT_IOCB_TYPE;
2693 ct_iocb->entry_status = 0;
2694 ct_iocb->sys_define = 0;
2695 ct_iocb->handle = sp->handle;
2697 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2698 ct_iocb->vp_index = sp->vha->vp_idx;
2699 ct_iocb->comp_status = cpu_to_le16(0);
2701 ct_iocb->cmd_dsd_count =
2702 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2703 ct_iocb->timeout = 0;
2704 ct_iocb->rsp_dsd_count =
2705 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2706 ct_iocb->rsp_byte_count =
2707 cpu_to_le32(bsg_job->reply_payload.payload_len);
2708 ct_iocb->cmd_byte_count =
2709 cpu_to_le32(bsg_job->request_payload.payload_len);
2710 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2711 (bsg_job->request_payload.sg_list)));
2712 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2713 (bsg_job->request_payload.sg_list)));
2714 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2715 (bsg_job->request_payload.sg_list));
2718 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2720 tot_dsds = bsg_job->reply_payload.sg_cnt;
2722 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2724 cont_a64_entry_t *cont_pkt;
2726 /* Allocate additional continuation packets? */
2727 if (avail_dsds == 0) {
2729 * Five DSDs are available in the Cont.
2732 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2734 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2739 sle_dma = sg_dma_address(sg);
2740 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2741 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2742 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2746 ct_iocb->entry_count = entry_count;
2750 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2751 * @sp: command to send to the ISP
2753 * Returns non-zero if a failure occurred, else zero.
2756 qla82xx_start_scsi(srb_t *sp)
2759 unsigned long flags;
2760 struct scsi_cmnd *cmd;
2767 struct device_reg_82xx __iomem *reg;
2770 uint8_t additional_cdb_len;
2771 struct ct6_dsd *ctx;
2772 struct scsi_qla_host *vha = sp->vha;
2773 struct qla_hw_data *ha = vha->hw;
2774 struct req_que *req = NULL;
2775 struct rsp_que *rsp = NULL;
2777 /* Setup device pointers. */
2778 reg = &ha->iobase->isp82;
2779 cmd = GET_CMD_SP(sp);
2781 rsp = ha->rsp_q_map[0];
2783 /* So we know we haven't pci_map'ed anything yet */
2786 dbval = 0x04 | (ha->portnum << 5);
2788 /* Send marker if required */
2789 if (vha->marker_needed != 0) {
2790 if (qla2x00_marker(vha, req,
2791 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2792 ql_log(ql_log_warn, vha, 0x300c,
2793 "qla2x00_marker failed for cmd=%p.\n", cmd);
2794 return QLA_FUNCTION_FAILED;
2796 vha->marker_needed = 0;
2799 /* Acquire ring specific lock */
2800 spin_lock_irqsave(&ha->hardware_lock, flags);
2802 /* Check for room in outstanding command list. */
2803 handle = req->current_outstanding_cmd;
2804 for (index = 1; index < req->num_outstanding_cmds; index++) {
2806 if (handle == req->num_outstanding_cmds)
2808 if (!req->outstanding_cmds[handle])
2811 if (index == req->num_outstanding_cmds)
2814 /* Map the sg table so we have an accurate count of sg entries needed */
2815 if (scsi_sg_count(cmd)) {
2816 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2817 scsi_sg_count(cmd), cmd->sc_data_direction);
2818 if (unlikely(!nseg))
2825 if (tot_dsds > ql2xshiftctondsd) {
2826 struct cmd_type_6 *cmd_pkt;
2827 uint16_t more_dsd_lists = 0;
2828 struct dsd_dma *dsd_ptr;
2831 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2832 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2833 ql_dbg(ql_dbg_io, vha, 0x300d,
2834 "Num of DSD list %d is than %d for cmd=%p.\n",
2835 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2840 if (more_dsd_lists <= ha->gbl_dsd_avail)
2841 goto sufficient_dsds;
2843 more_dsd_lists -= ha->gbl_dsd_avail;
2845 for (i = 0; i < more_dsd_lists; i++) {
2846 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2848 ql_log(ql_log_fatal, vha, 0x300e,
2849 "Failed to allocate memory for dsd_dma "
2850 "for cmd=%p.\n", cmd);
2854 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2855 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2856 if (!dsd_ptr->dsd_addr) {
2858 ql_log(ql_log_fatal, vha, 0x300f,
2859 "Failed to allocate memory for dsd_addr "
2860 "for cmd=%p.\n", cmd);
2863 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2864 ha->gbl_dsd_avail++;
2870 if (req->cnt < (req_cnt + 2)) {
2871 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2872 ®->req_q_out[0]);
2873 if (req->ring_index < cnt)
2874 req->cnt = cnt - req->ring_index;
2876 req->cnt = req->length -
2877 (req->ring_index - cnt);
2878 if (req->cnt < (req_cnt + 2))
2882 ctx = sp->u.scmd.ctx =
2883 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2885 ql_log(ql_log_fatal, vha, 0x3010,
2886 "Failed to allocate ctx for cmd=%p.\n", cmd);
2890 memset(ctx, 0, sizeof(struct ct6_dsd));
2891 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2892 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2893 if (!ctx->fcp_cmnd) {
2894 ql_log(ql_log_fatal, vha, 0x3011,
2895 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2899 /* Initialize the DSD list and dma handle */
2900 INIT_LIST_HEAD(&ctx->dsd_list);
2901 ctx->dsd_use_cnt = 0;
2903 if (cmd->cmd_len > 16) {
2904 additional_cdb_len = cmd->cmd_len - 16;
2905 if ((cmd->cmd_len % 4) != 0) {
2906 /* SCSI command bigger than 16 bytes must be
2909 ql_log(ql_log_warn, vha, 0x3012,
2910 "scsi cmd len %d not multiple of 4 "
2911 "for cmd=%p.\n", cmd->cmd_len, cmd);
2912 goto queuing_error_fcp_cmnd;
2914 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2916 additional_cdb_len = 0;
2917 ctx->fcp_cmnd_len = 12 + 16 + 4;
2920 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2921 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2923 /* Zero out remaining portion of packet. */
2924 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2925 clr_ptr = (uint32_t *)cmd_pkt + 2;
2926 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2927 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2929 /* Set NPORT-ID and LUN number*/
2930 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2931 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2932 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2933 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2934 cmd_pkt->vp_index = sp->vha->vp_idx;
2936 /* Build IOCB segments */
2937 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2938 goto queuing_error_fcp_cmnd;
2940 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2941 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2943 /* build FCP_CMND IU */
2944 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2945 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2946 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2948 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2949 ctx->fcp_cmnd->additional_cdb_len |= 1;
2950 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2951 ctx->fcp_cmnd->additional_cdb_len |= 2;
2953 /* Populate the FCP_PRIO. */
2954 if (ha->flags.fcp_prio_enabled)
2955 ctx->fcp_cmnd->task_attribute |=
2956 sp->fcport->fcp_prio << 3;
2958 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2960 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2961 additional_cdb_len);
2962 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2964 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2965 cmd_pkt->fcp_cmnd_dseg_address[0] =
2966 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2967 cmd_pkt->fcp_cmnd_dseg_address[1] =
2968 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2970 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2971 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2972 /* Set total data segment count. */
2973 cmd_pkt->entry_count = (uint8_t)req_cnt;
2974 /* Specify response queue number where
2975 * completion should happen
2977 cmd_pkt->entry_status = (uint8_t) rsp->id;
2979 struct cmd_type_7 *cmd_pkt;
2980 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2981 if (req->cnt < (req_cnt + 2)) {
2982 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2983 ®->req_q_out[0]);
2984 if (req->ring_index < cnt)
2985 req->cnt = cnt - req->ring_index;
2987 req->cnt = req->length -
2988 (req->ring_index - cnt);
2990 if (req->cnt < (req_cnt + 2))
2993 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2994 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2996 /* Zero out remaining portion of packet. */
2997 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2998 clr_ptr = (uint32_t *)cmd_pkt + 2;
2999 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3000 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3002 /* Set NPORT-ID and LUN number*/
3003 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3004 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3005 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3006 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3007 cmd_pkt->vp_index = sp->vha->vp_idx;
3009 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3010 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3011 sizeof(cmd_pkt->lun));
3013 /* Populate the FCP_PRIO. */
3014 if (ha->flags.fcp_prio_enabled)
3015 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3017 /* Load SCSI command packet. */
3018 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3019 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3021 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3023 /* Build IOCB segments */
3024 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3026 /* Set total data segment count. */
3027 cmd_pkt->entry_count = (uint8_t)req_cnt;
3028 /* Specify response queue number where
3029 * completion should happen.
3031 cmd_pkt->entry_status = (uint8_t) rsp->id;
3034 /* Build command packet. */
3035 req->current_outstanding_cmd = handle;
3036 req->outstanding_cmds[handle] = sp;
3037 sp->handle = handle;
3038 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3039 req->cnt -= req_cnt;
3042 /* Adjust ring index. */
3044 if (req->ring_index == req->length) {
3045 req->ring_index = 0;
3046 req->ring_ptr = req->ring;
3050 sp->flags |= SRB_DMA_VALID;
3052 /* Set chip new ring index. */
3053 /* write, read and verify logic */
3054 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3056 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3058 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3060 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3061 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3066 /* Manage unprocessed RIO/ZIO commands in response queue. */
3067 if (vha->flags.process_response_queue &&
3068 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3069 qla24xx_process_response_queue(vha, rsp);
3071 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3074 queuing_error_fcp_cmnd:
3075 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3078 scsi_dma_unmap(cmd);
3080 if (sp->u.scmd.ctx) {
3081 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3082 sp->u.scmd.ctx = NULL;
3084 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3086 return QLA_FUNCTION_FAILED;
3090 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3092 struct srb_iocb *aio = &sp->u.iocb_cmd;
3093 scsi_qla_host_t *vha = sp->vha;
3094 struct req_que *req = vha->req;
3096 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3097 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3098 abt_iocb->entry_count = 1;
3099 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3100 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3101 abt_iocb->handle_to_abort =
3102 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3103 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3104 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3105 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3106 abt_iocb->vp_index = vha->vp_idx;
3107 abt_iocb->req_que_no = cpu_to_le16(req->id);
3108 /* Send the command to the firmware */
3113 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3117 mbx->entry_type = MBX_IOCB_TYPE;
3118 mbx->handle = sp->handle;
3119 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3121 for (i = 0; i < sz; i++)
3122 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3126 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3128 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3129 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3130 ct_pkt->handle = sp->handle;
3133 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3134 struct nack_to_isp *nack)
3136 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3138 nack->entry_type = NOTIFY_ACK_TYPE;
3139 nack->entry_count = 1;
3140 nack->ox_id = ntfy->ox_id;
3142 nack->u.isp24.handle = sp->handle;
3143 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3144 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3145 nack->u.isp24.flags = ntfy->u.isp24.flags &
3146 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3148 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3149 nack->u.isp24.status = ntfy->u.isp24.status;
3150 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3151 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3152 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3153 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3154 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3155 nack->u.isp24.srr_flags = 0;
3156 nack->u.isp24.srr_reject_code = 0;
3157 nack->u.isp24.srr_reject_code_expl = 0;
3158 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3162 * Build NVME LS request
3165 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3167 struct srb_iocb *nvme;
3168 int rval = QLA_SUCCESS;
3170 nvme = &sp->u.iocb_cmd;
3171 cmd_pkt->entry_type = PT_LS4_REQUEST;
3172 cmd_pkt->entry_count = 1;
3173 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3175 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3176 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3177 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3179 cmd_pkt->tx_dseg_count = 1;
3180 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3181 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3182 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3183 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3185 cmd_pkt->rx_dseg_count = 1;
3186 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3187 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3188 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3189 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3195 qla2x00_start_sp(srb_t *sp)
3198 scsi_qla_host_t *vha = sp->vha;
3199 struct qla_hw_data *ha = vha->hw;
3201 unsigned long flags;
3203 rval = QLA_FUNCTION_FAILED;
3204 spin_lock_irqsave(&ha->hardware_lock, flags);
3205 pkt = qla2x00_alloc_iocbs(vha, sp);
3207 ql_log(ql_log_warn, vha, 0x700c,
3208 "qla2x00_alloc_iocbs failed.\n");
3215 IS_FWI2_CAPABLE(ha) ?
3216 qla24xx_login_iocb(sp, pkt) :
3217 qla2x00_login_iocb(sp, pkt);
3220 qla24xx_prli_iocb(sp, pkt);
3222 case SRB_LOGOUT_CMD:
3223 IS_FWI2_CAPABLE(ha) ?
3224 qla24xx_logout_iocb(sp, pkt) :
3225 qla2x00_logout_iocb(sp, pkt);
3227 case SRB_ELS_CMD_RPT:
3228 case SRB_ELS_CMD_HST:
3229 qla24xx_els_iocb(sp, pkt);
3232 IS_FWI2_CAPABLE(ha) ?
3233 qla24xx_ct_iocb(sp, pkt) :
3234 qla2x00_ct_iocb(sp, pkt);
3237 IS_FWI2_CAPABLE(ha) ?
3238 qla24xx_adisc_iocb(sp, pkt) :
3239 qla2x00_adisc_iocb(sp, pkt);
3243 qlafx00_tm_iocb(sp, pkt) :
3244 qla24xx_tm_iocb(sp, pkt);
3246 case SRB_FXIOCB_DCMD:
3247 case SRB_FXIOCB_BCMD:
3248 qlafx00_fxdisc_iocb(sp, pkt);
3251 qla_nvme_ls(sp, pkt);
3255 qlafx00_abort_iocb(sp, pkt) :
3256 qla24xx_abort_iocb(sp, pkt);
3259 qla24xx_els_logo_iocb(sp, pkt);
3261 case SRB_CT_PTHRU_CMD:
3262 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3265 qla2x00_mb_iocb(sp, pkt);
3267 case SRB_NACK_PLOGI:
3270 qla2x00_send_notify_ack_iocb(sp, pkt);
3277 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3279 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3284 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3285 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3287 uint16_t avail_dsds;
3289 uint32_t req_data_len = 0;
3290 uint32_t rsp_data_len = 0;
3291 struct scatterlist *sg;
3293 int entry_count = 1;
3294 struct bsg_job *bsg_job = sp->u.bsg_job;
3296 /*Update entry type to indicate bidir command */
3297 *((uint32_t *)(&cmd_pkt->entry_type)) =
3298 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3300 /* Set the transfer direction, in this set both flags
3301 * Also set the BD_WRAP_BACK flag, firmware will take care
3302 * assigning DID=SID for outgoing pkts.
3304 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3305 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3306 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3309 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3310 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3311 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3312 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3314 vha->bidi_stats.transfer_bytes += req_data_len;
3315 vha->bidi_stats.io_count++;
3317 vha->qla_stats.output_bytes += req_data_len;
3318 vha->qla_stats.output_requests++;
3320 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3321 * are bundled in continuation iocb
3324 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3328 for_each_sg(bsg_job->request_payload.sg_list, sg,
3329 bsg_job->request_payload.sg_cnt, index) {
3331 cont_a64_entry_t *cont_pkt;
3333 /* Allocate additional continuation packets */
3334 if (avail_dsds == 0) {
3335 /* Continuation type 1 IOCB can accomodate
3338 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3339 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3343 sle_dma = sg_dma_address(sg);
3344 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3345 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3346 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3349 /* For read request DSD will always goes to continuation IOCB
3350 * and follow the write DSD. If there is room on the current IOCB
3351 * then it is added to that IOCB else new continuation IOCB is
3354 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3355 bsg_job->reply_payload.sg_cnt, index) {
3357 cont_a64_entry_t *cont_pkt;
3359 /* Allocate additional continuation packets */
3360 if (avail_dsds == 0) {
3361 /* Continuation type 1 IOCB can accomodate
3364 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3365 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3369 sle_dma = sg_dma_address(sg);
3370 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3371 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3372 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3375 /* This value should be same as number of IOCB required for this cmd */
3376 cmd_pkt->entry_count = entry_count;
3380 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3383 struct qla_hw_data *ha = vha->hw;
3384 unsigned long flags;
3390 struct cmd_bidir *cmd_pkt = NULL;
3391 struct rsp_que *rsp;
3392 struct req_que *req;
3393 int rval = EXT_STATUS_OK;
3397 rsp = ha->rsp_q_map[0];
3400 /* Send marker if required */
3401 if (vha->marker_needed != 0) {
3402 if (qla2x00_marker(vha, req,
3403 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3404 return EXT_STATUS_MAILBOX;
3405 vha->marker_needed = 0;
3408 /* Acquire ring specific lock */
3409 spin_lock_irqsave(&ha->hardware_lock, flags);
3411 /* Check for room in outstanding command list. */
3412 handle = req->current_outstanding_cmd;
3413 for (index = 1; index < req->num_outstanding_cmds; index++) {
3415 if (handle == req->num_outstanding_cmds)
3417 if (!req->outstanding_cmds[handle])
3421 if (index == req->num_outstanding_cmds) {
3422 rval = EXT_STATUS_BUSY;
3426 /* Calculate number of IOCB required */
3427 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3429 /* Check for room on request queue. */
3430 if (req->cnt < req_cnt + 2) {
3431 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3432 RD_REG_DWORD_RELAXED(req->req_q_out);
3433 if (req->ring_index < cnt)
3434 req->cnt = cnt - req->ring_index;
3436 req->cnt = req->length -
3437 (req->ring_index - cnt);
3439 if (req->cnt < req_cnt + 2) {
3440 rval = EXT_STATUS_BUSY;
3444 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3445 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3447 /* Zero out remaining portion of packet. */
3448 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3449 clr_ptr = (uint32_t *)cmd_pkt + 2;
3450 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3452 /* Set NPORT-ID (of vha)*/
3453 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3454 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3455 cmd_pkt->port_id[1] = vha->d_id.b.area;
3456 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3458 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3459 cmd_pkt->entry_status = (uint8_t) rsp->id;
3460 /* Build command packet. */
3461 req->current_outstanding_cmd = handle;
3462 req->outstanding_cmds[handle] = sp;
3463 sp->handle = handle;
3464 req->cnt -= req_cnt;
3466 /* Send the command to the firmware */
3468 qla2x00_start_iocbs(vha, req);
3470 spin_unlock_irqrestore(&ha->hardware_lock, flags);