]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge branch 'WIP.x86/process' into perf/core
[karo-tx-linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @cmd: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140             cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         uint32_t        *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         *((uint32_t *)(&cmd_pkt->entry_type)) =
205             cpu_to_le32(COMMAND_TYPE);
206
207         /* No data transfer */
208         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209                 cmd_pkt->byte_count = cpu_to_le32(0);
210                 return;
211         }
212
213         vha = sp->vha;
214         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216         /* Three DSDs are available in the Command Type 2 IOCB */
217         avail_dsds = 3;
218         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220         /* Load data segments */
221         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222                 cont_entry_t *cont_pkt;
223
224                 /* Allocate additional continuation packets? */
225                 if (avail_dsds == 0) {
226                         /*
227                          * Seven DSDs are available in the Continuation
228                          * Type 0 IOCB.
229                          */
230                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232                         avail_dsds = 7;
233                 }
234
235                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237                 avail_dsds--;
238         }
239 }
240
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252         uint16_t        avail_dsds;
253         uint32_t        *cur_dsd;
254         scsi_qla_host_t *vha;
255         struct scsi_cmnd *cmd;
256         struct scatterlist *sg;
257         int i;
258
259         cmd = GET_CMD_SP(sp);
260
261         /* Update entry type to indicate Command Type 3 IOCB */
262         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
263
264         /* No data transfer */
265         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266                 cmd_pkt->byte_count = cpu_to_le32(0);
267                 return;
268         }
269
270         vha = sp->vha;
271         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272
273         /* Two DSDs are available in the Command Type 3 IOCB */
274         avail_dsds = 2;
275         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276
277         /* Load data segments */
278         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279                 dma_addr_t      sle_dma;
280                 cont_a64_entry_t *cont_pkt;
281
282                 /* Allocate additional continuation packets? */
283                 if (avail_dsds == 0) {
284                         /*
285                          * Five DSDs are available in the Continuation
286                          * Type 1 IOCB.
287                          */
288                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290                         avail_dsds = 5;
291                 }
292
293                 sle_dma = sg_dma_address(sg);
294                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297                 avail_dsds--;
298         }
299 }
300
301 /**
302  * qla2x00_start_scsi() - Send a SCSI command to the ISP
303  * @sp: command to send to the ISP
304  *
305  * Returns non-zero if a failure occurred, else zero.
306  */
307 int
308 qla2x00_start_scsi(srb_t *sp)
309 {
310         int             nseg;
311         unsigned long   flags;
312         scsi_qla_host_t *vha;
313         struct scsi_cmnd *cmd;
314         uint32_t        *clr_ptr;
315         uint32_t        index;
316         uint32_t        handle;
317         cmd_entry_t     *cmd_pkt;
318         uint16_t        cnt;
319         uint16_t        req_cnt;
320         uint16_t        tot_dsds;
321         struct device_reg_2xxx __iomem *reg;
322         struct qla_hw_data *ha;
323         struct req_que *req;
324         struct rsp_que *rsp;
325
326         /* Setup device pointers. */
327         vha = sp->vha;
328         ha = vha->hw;
329         reg = &ha->iobase->isp;
330         cmd = GET_CMD_SP(sp);
331         req = ha->req_q_map[0];
332         rsp = ha->rsp_q_map[0];
333         /* So we know we haven't pci_map'ed anything yet */
334         tot_dsds = 0;
335
336         /* Send marker if required */
337         if (vha->marker_needed != 0) {
338                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339                     QLA_SUCCESS) {
340                         return (QLA_FUNCTION_FAILED);
341                 }
342                 vha->marker_needed = 0;
343         }
344
345         /* Acquire ring specific lock */
346         spin_lock_irqsave(&ha->hardware_lock, flags);
347
348         /* Check for room in outstanding command list. */
349         handle = req->current_outstanding_cmd;
350         for (index = 1; index < req->num_outstanding_cmds; index++) {
351                 handle++;
352                 if (handle == req->num_outstanding_cmds)
353                         handle = 1;
354                 if (!req->outstanding_cmds[handle])
355                         break;
356         }
357         if (index == req->num_outstanding_cmds)
358                 goto queuing_error;
359
360         /* Map the sg table so we have an accurate count of sg entries needed */
361         if (scsi_sg_count(cmd)) {
362                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363                     scsi_sg_count(cmd), cmd->sc_data_direction);
364                 if (unlikely(!nseg))
365                         goto queuing_error;
366         } else
367                 nseg = 0;
368
369         tot_dsds = nseg;
370
371         /* Calculate the number of request entries needed. */
372         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373         if (req->cnt < (req_cnt + 2)) {
374                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375                 if (req->ring_index < cnt)
376                         req->cnt = cnt - req->ring_index;
377                 else
378                         req->cnt = req->length -
379                             (req->ring_index - cnt);
380                 /* If still no head room then bail out */
381                 if (req->cnt < (req_cnt + 2))
382                         goto queuing_error;
383         }
384
385         /* Build command packet */
386         req->current_outstanding_cmd = handle;
387         req->outstanding_cmds[handle] = sp;
388         sp->handle = handle;
389         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
390         req->cnt -= req_cnt;
391
392         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393         cmd_pkt->handle = handle;
394         /* Zero out remaining portion of packet. */
395         clr_ptr = (uint32_t *)cmd_pkt + 2;
396         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398
399         /* Set target ID and LUN number*/
400         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
403
404         /* Load SCSI command packet. */
405         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
407
408         /* Build IOCB segments */
409         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
410
411         /* Set total data segment count. */
412         cmd_pkt->entry_count = (uint8_t)req_cnt;
413         wmb();
414
415         /* Adjust ring index. */
416         req->ring_index++;
417         if (req->ring_index == req->length) {
418                 req->ring_index = 0;
419                 req->ring_ptr = req->ring;
420         } else
421                 req->ring_ptr++;
422
423         sp->flags |= SRB_DMA_VALID;
424
425         /* Set chip new ring index. */
426         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
428
429         /* Manage unprocessed RIO/ZIO commands in response queue. */
430         if (vha->flags.process_response_queue &&
431             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432                 qla2x00_process_response_queue(rsp);
433
434         spin_unlock_irqrestore(&ha->hardware_lock, flags);
435         return (QLA_SUCCESS);
436
437 queuing_error:
438         if (tot_dsds)
439                 scsi_dma_unmap(cmd);
440
441         spin_unlock_irqrestore(&ha->hardware_lock, flags);
442
443         return (QLA_FUNCTION_FAILED);
444 }
445
446 /**
447  * qla2x00_start_iocbs() - Execute the IOCB command
448  */
449 void
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451 {
452         struct qla_hw_data *ha = vha->hw;
453         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
454
455         if (IS_P3P_TYPE(ha)) {
456                 qla82xx_start_iocbs(vha);
457         } else {
458                 /* Adjust ring index. */
459                 req->ring_index++;
460                 if (req->ring_index == req->length) {
461                         req->ring_index = 0;
462                         req->ring_ptr = req->ring;
463                 } else
464                         req->ring_ptr++;
465
466                 /* Set chip new ring index. */
467                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
468                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
469                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
470                 } else if (IS_QLAFX00(ha)) {
471                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
472                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
473                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
474                 } else if (IS_FWI2_CAPABLE(ha)) {
475                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
476                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
477                 } else {
478                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
479                                 req->ring_index);
480                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
481                 }
482         }
483 }
484
485 /**
486  * qla2x00_marker() - Send a marker IOCB to the firmware.
487  * @ha: HA context
488  * @loop_id: loop ID
489  * @lun: LUN
490  * @type: marker modifier
491  *
492  * Can be called from both normal and interrupt context.
493  *
494  * Returns non-zero if a failure occurred, else zero.
495  */
496 static int
497 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
498                         struct rsp_que *rsp, uint16_t loop_id,
499                         uint64_t lun, uint8_t type)
500 {
501         mrk_entry_t *mrk;
502         struct mrk_entry_24xx *mrk24 = NULL;
503
504         struct qla_hw_data *ha = vha->hw;
505         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
506
507         req = ha->req_q_map[0];
508         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
509         if (mrk == NULL) {
510                 ql_log(ql_log_warn, base_vha, 0x3026,
511                     "Failed to allocate Marker IOCB.\n");
512
513                 return (QLA_FUNCTION_FAILED);
514         }
515
516         mrk->entry_type = MARKER_TYPE;
517         mrk->modifier = type;
518         if (type != MK_SYNC_ALL) {
519                 if (IS_FWI2_CAPABLE(ha)) {
520                         mrk24 = (struct mrk_entry_24xx *) mrk;
521                         mrk24->nport_handle = cpu_to_le16(loop_id);
522                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
523                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
524                         mrk24->vp_index = vha->vp_idx;
525                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
526                 } else {
527                         SET_TARGET_ID(ha, mrk->target, loop_id);
528                         mrk->lun = cpu_to_le16((uint16_t)lun);
529                 }
530         }
531         wmb();
532
533         qla2x00_start_iocbs(vha, req);
534
535         return (QLA_SUCCESS);
536 }
537
538 int
539 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
540                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
541                 uint8_t type)
542 {
543         int ret;
544         unsigned long flags = 0;
545
546         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
547         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
548         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
549
550         return (ret);
551 }
552
553 /*
554  * qla2x00_issue_marker
555  *
556  * Issue marker
557  * Caller CAN have hardware lock held as specified by ha_locked parameter.
558  * Might release it, then reaquire.
559  */
560 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
561 {
562         if (ha_locked) {
563                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
564                                         MK_SYNC_ALL) != QLA_SUCCESS)
565                         return QLA_FUNCTION_FAILED;
566         } else {
567                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
568                                         MK_SYNC_ALL) != QLA_SUCCESS)
569                         return QLA_FUNCTION_FAILED;
570         }
571         vha->marker_needed = 0;
572
573         return QLA_SUCCESS;
574 }
575
576 static inline int
577 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
578         uint16_t tot_dsds)
579 {
580         uint32_t *cur_dsd = NULL;
581         scsi_qla_host_t *vha;
582         struct qla_hw_data *ha;
583         struct scsi_cmnd *cmd;
584         struct  scatterlist *cur_seg;
585         uint32_t *dsd_seg;
586         void *next_dsd;
587         uint8_t avail_dsds;
588         uint8_t first_iocb = 1;
589         uint32_t dsd_list_len;
590         struct dsd_dma *dsd_ptr;
591         struct ct6_dsd *ctx;
592
593         cmd = GET_CMD_SP(sp);
594
595         /* Update entry type to indicate Command Type 3 IOCB */
596         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
597
598         /* No data transfer */
599         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
600                 cmd_pkt->byte_count = cpu_to_le32(0);
601                 return 0;
602         }
603
604         vha = sp->vha;
605         ha = vha->hw;
606
607         /* Set transfer direction */
608         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
609                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
610                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
611                 vha->qla_stats.output_requests++;
612         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
613                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
614                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
615                 vha->qla_stats.input_requests++;
616         }
617
618         cur_seg = scsi_sglist(cmd);
619         ctx = GET_CMD_CTX_SP(sp);
620
621         while (tot_dsds) {
622                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
623                     QLA_DSDS_PER_IOCB : tot_dsds;
624                 tot_dsds -= avail_dsds;
625                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
626
627                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
628                     struct dsd_dma, list);
629                 next_dsd = dsd_ptr->dsd_addr;
630                 list_del(&dsd_ptr->list);
631                 ha->gbl_dsd_avail--;
632                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
633                 ctx->dsd_use_cnt++;
634                 ha->gbl_dsd_inuse++;
635
636                 if (first_iocb) {
637                         first_iocb = 0;
638                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
639                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
640                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
641                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
642                 } else {
643                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
644                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
645                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
646                 }
647                 cur_dsd = (uint32_t *)next_dsd;
648                 while (avail_dsds) {
649                         dma_addr_t      sle_dma;
650
651                         sle_dma = sg_dma_address(cur_seg);
652                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
653                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
654                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
655                         cur_seg = sg_next(cur_seg);
656                         avail_dsds--;
657                 }
658         }
659
660         /* Null termination */
661         *cur_dsd++ =  0;
662         *cur_dsd++ = 0;
663         *cur_dsd++ = 0;
664         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
665         return 0;
666 }
667
668 /*
669  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670  * for Command Type 6.
671  *
672  * @dsds: number of data segment decriptors needed
673  *
674  * Returns the number of dsd list needed to store @dsds.
675  */
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds)
678 {
679         uint16_t dsd_lists = 0;
680
681         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682         if (dsds % QLA_DSDS_PER_IOCB)
683                 dsd_lists++;
684         return dsd_lists;
685 }
686
687
688 /**
689  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
690  * IOCB types.
691  *
692  * @sp: SRB command to process
693  * @cmd_pkt: Command type 3 IOCB
694  * @tot_dsds: Total number of segments to transfer
695  * @req: pointer to request queue
696  */
697 inline void
698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699         uint16_t tot_dsds, struct req_que *req)
700 {
701         uint16_t        avail_dsds;
702         uint32_t        *cur_dsd;
703         scsi_qla_host_t *vha;
704         struct scsi_cmnd *cmd;
705         struct scatterlist *sg;
706         int i;
707
708         cmd = GET_CMD_SP(sp);
709
710         /* Update entry type to indicate Command Type 3 IOCB */
711         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
712
713         /* No data transfer */
714         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
715                 cmd_pkt->byte_count = cpu_to_le32(0);
716                 return;
717         }
718
719         vha = sp->vha;
720
721         /* Set transfer direction */
722         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
723                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
724                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
725                 vha->qla_stats.output_requests++;
726         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
727                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
728                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
729                 vha->qla_stats.input_requests++;
730         }
731
732         /* One DSD is available in the Command Type 3 IOCB */
733         avail_dsds = 1;
734         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
735
736         /* Load data segments */
737
738         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
739                 dma_addr_t      sle_dma;
740                 cont_a64_entry_t *cont_pkt;
741
742                 /* Allocate additional continuation packets? */
743                 if (avail_dsds == 0) {
744                         /*
745                          * Five DSDs are available in the Continuation
746                          * Type 1 IOCB.
747                          */
748                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
749                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
750                         avail_dsds = 5;
751                 }
752
753                 sle_dma = sg_dma_address(sg);
754                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
755                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
756                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
757                 avail_dsds--;
758         }
759 }
760
761 struct fw_dif_context {
762         uint32_t ref_tag;
763         uint16_t app_tag;
764         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
765         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
766 };
767
768 /*
769  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
770  *
771  */
772 static inline void
773 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
774     unsigned int protcnt)
775 {
776         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
777
778         switch (scsi_get_prot_type(cmd)) {
779         case SCSI_PROT_DIF_TYPE0:
780                 /*
781                  * No check for ql2xenablehba_err_chk, as it would be an
782                  * I/O error if hba tag generation is not done.
783                  */
784                 pkt->ref_tag = cpu_to_le32((uint32_t)
785                     (0xffffffff & scsi_get_lba(cmd)));
786
787                 if (!qla2x00_hba_err_chk_enabled(sp))
788                         break;
789
790                 pkt->ref_tag_mask[0] = 0xff;
791                 pkt->ref_tag_mask[1] = 0xff;
792                 pkt->ref_tag_mask[2] = 0xff;
793                 pkt->ref_tag_mask[3] = 0xff;
794                 break;
795
796         /*
797          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
798          * match LBA in CDB + N
799          */
800         case SCSI_PROT_DIF_TYPE2:
801                 pkt->app_tag = cpu_to_le16(0);
802                 pkt->app_tag_mask[0] = 0x0;
803                 pkt->app_tag_mask[1] = 0x0;
804
805                 pkt->ref_tag = cpu_to_le32((uint32_t)
806                     (0xffffffff & scsi_get_lba(cmd)));
807
808                 if (!qla2x00_hba_err_chk_enabled(sp))
809                         break;
810
811                 /* enable ALL bytes of the ref tag */
812                 pkt->ref_tag_mask[0] = 0xff;
813                 pkt->ref_tag_mask[1] = 0xff;
814                 pkt->ref_tag_mask[2] = 0xff;
815                 pkt->ref_tag_mask[3] = 0xff;
816                 break;
817
818         /* For Type 3 protection: 16 bit GUARD only */
819         case SCSI_PROT_DIF_TYPE3:
820                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
821                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
822                                                                 0x00;
823                 break;
824
825         /*
826          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
827          * 16 bit app tag.
828          */
829         case SCSI_PROT_DIF_TYPE1:
830                 pkt->ref_tag = cpu_to_le32((uint32_t)
831                     (0xffffffff & scsi_get_lba(cmd)));
832                 pkt->app_tag = cpu_to_le16(0);
833                 pkt->app_tag_mask[0] = 0x0;
834                 pkt->app_tag_mask[1] = 0x0;
835
836                 if (!qla2x00_hba_err_chk_enabled(sp))
837                         break;
838
839                 /* enable ALL bytes of the ref tag */
840                 pkt->ref_tag_mask[0] = 0xff;
841                 pkt->ref_tag_mask[1] = 0xff;
842                 pkt->ref_tag_mask[2] = 0xff;
843                 pkt->ref_tag_mask[3] = 0xff;
844                 break;
845         }
846 }
847
848 int
849 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
850         uint32_t *partial)
851 {
852         struct scatterlist *sg;
853         uint32_t cumulative_partial, sg_len;
854         dma_addr_t sg_dma_addr;
855
856         if (sgx->num_bytes == sgx->tot_bytes)
857                 return 0;
858
859         sg = sgx->cur_sg;
860         cumulative_partial = sgx->tot_partial;
861
862         sg_dma_addr = sg_dma_address(sg);
863         sg_len = sg_dma_len(sg);
864
865         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
866
867         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
868                 sgx->dma_len = (blk_sz - cumulative_partial);
869                 sgx->tot_partial = 0;
870                 sgx->num_bytes += blk_sz;
871                 *partial = 0;
872         } else {
873                 sgx->dma_len = sg_len - sgx->bytes_consumed;
874                 sgx->tot_partial += sgx->dma_len;
875                 *partial = 1;
876         }
877
878         sgx->bytes_consumed += sgx->dma_len;
879
880         if (sg_len == sgx->bytes_consumed) {
881                 sg = sg_next(sg);
882                 sgx->num_sg++;
883                 sgx->cur_sg = sg;
884                 sgx->bytes_consumed = 0;
885         }
886
887         return 1;
888 }
889
890 int
891 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
892         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
893 {
894         void *next_dsd;
895         uint8_t avail_dsds = 0;
896         uint32_t dsd_list_len;
897         struct dsd_dma *dsd_ptr;
898         struct scatterlist *sg_prot;
899         uint32_t *cur_dsd = dsd;
900         uint16_t        used_dsds = tot_dsds;
901         uint32_t        prot_int; /* protection interval */
902         uint32_t        partial;
903         struct qla2_sgx sgx;
904         dma_addr_t      sle_dma;
905         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
906         struct scsi_cmnd *cmd;
907
908         memset(&sgx, 0, sizeof(struct qla2_sgx));
909         if (sp) {
910                 cmd = GET_CMD_SP(sp);
911                 prot_int = cmd->device->sector_size;
912
913                 sgx.tot_bytes = scsi_bufflen(cmd);
914                 sgx.cur_sg = scsi_sglist(cmd);
915                 sgx.sp = sp;
916
917                 sg_prot = scsi_prot_sglist(cmd);
918         } else if (tc) {
919                 prot_int      = tc->blk_sz;
920                 sgx.tot_bytes = tc->bufflen;
921                 sgx.cur_sg    = tc->sg;
922                 sg_prot       = tc->prot_sg;
923         } else {
924                 BUG();
925                 return 1;
926         }
927
928         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
929
930                 sle_dma = sgx.dma_addr;
931                 sle_dma_len = sgx.dma_len;
932 alloc_and_fill:
933                 /* Allocate additional continuation packets? */
934                 if (avail_dsds == 0) {
935                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
936                                         QLA_DSDS_PER_IOCB : used_dsds;
937                         dsd_list_len = (avail_dsds + 1) * 12;
938                         used_dsds -= avail_dsds;
939
940                         /* allocate tracking DS */
941                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
942                         if (!dsd_ptr)
943                                 return 1;
944
945                         /* allocate new list */
946                         dsd_ptr->dsd_addr = next_dsd =
947                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
948                                 &dsd_ptr->dsd_list_dma);
949
950                         if (!next_dsd) {
951                                 /*
952                                  * Need to cleanup only this dsd_ptr, rest
953                                  * will be done by sp_free_dma()
954                                  */
955                                 kfree(dsd_ptr);
956                                 return 1;
957                         }
958
959                         if (sp) {
960                                 list_add_tail(&dsd_ptr->list,
961                                     &((struct crc_context *)
962                                             sp->u.scmd.ctx)->dsd_list);
963
964                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
965                         } else {
966                                 list_add_tail(&dsd_ptr->list,
967                                     &(tc->ctx->dsd_list));
968                                 *tc->ctx_dsd_alloced = 1;
969                         }
970
971
972                         /* add new list to cmd iocb or last list */
973                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
974                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
975                         *cur_dsd++ = dsd_list_len;
976                         cur_dsd = (uint32_t *)next_dsd;
977                 }
978                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
979                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
980                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
981                 avail_dsds--;
982
983                 if (partial == 0) {
984                         /* Got a full protection interval */
985                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
986                         sle_dma_len = 8;
987
988                         tot_prot_dma_len += sle_dma_len;
989                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
990                                 tot_prot_dma_len = 0;
991                                 sg_prot = sg_next(sg_prot);
992                         }
993
994                         partial = 1; /* So as to not re-enter this block */
995                         goto alloc_and_fill;
996                 }
997         }
998         /* Null termination */
999         *cur_dsd++ = 0;
1000         *cur_dsd++ = 0;
1001         *cur_dsd++ = 0;
1002         return 0;
1003 }
1004
1005 int
1006 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1007         uint16_t tot_dsds, struct qla_tc_param *tc)
1008 {
1009         void *next_dsd;
1010         uint8_t avail_dsds = 0;
1011         uint32_t dsd_list_len;
1012         struct dsd_dma *dsd_ptr;
1013         struct scatterlist *sg, *sgl;
1014         uint32_t *cur_dsd = dsd;
1015         int     i;
1016         uint16_t        used_dsds = tot_dsds;
1017         struct scsi_cmnd *cmd;
1018
1019         if (sp) {
1020                 cmd = GET_CMD_SP(sp);
1021                 sgl = scsi_sglist(cmd);
1022         } else if (tc) {
1023                 sgl = tc->sg;
1024         } else {
1025                 BUG();
1026                 return 1;
1027         }
1028
1029
1030         for_each_sg(sgl, sg, tot_dsds, i) {
1031                 dma_addr_t      sle_dma;
1032
1033                 /* Allocate additional continuation packets? */
1034                 if (avail_dsds == 0) {
1035                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1036                                         QLA_DSDS_PER_IOCB : used_dsds;
1037                         dsd_list_len = (avail_dsds + 1) * 12;
1038                         used_dsds -= avail_dsds;
1039
1040                         /* allocate tracking DS */
1041                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1042                         if (!dsd_ptr)
1043                                 return 1;
1044
1045                         /* allocate new list */
1046                         dsd_ptr->dsd_addr = next_dsd =
1047                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1048                                 &dsd_ptr->dsd_list_dma);
1049
1050                         if (!next_dsd) {
1051                                 /*
1052                                  * Need to cleanup only this dsd_ptr, rest
1053                                  * will be done by sp_free_dma()
1054                                  */
1055                                 kfree(dsd_ptr);
1056                                 return 1;
1057                         }
1058
1059                         if (sp) {
1060                                 list_add_tail(&dsd_ptr->list,
1061                                     &((struct crc_context *)
1062                                             sp->u.scmd.ctx)->dsd_list);
1063
1064                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1065                         } else {
1066                                 list_add_tail(&dsd_ptr->list,
1067                                     &(tc->ctx->dsd_list));
1068                                 *tc->ctx_dsd_alloced = 1;
1069                         }
1070
1071                         /* add new list to cmd iocb or last list */
1072                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1073                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1074                         *cur_dsd++ = dsd_list_len;
1075                         cur_dsd = (uint32_t *)next_dsd;
1076                 }
1077                 sle_dma = sg_dma_address(sg);
1078
1079                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1080                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1081                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1082                 avail_dsds--;
1083
1084         }
1085         /* Null termination */
1086         *cur_dsd++ = 0;
1087         *cur_dsd++ = 0;
1088         *cur_dsd++ = 0;
1089         return 0;
1090 }
1091
1092 int
1093 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1094         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1095 {
1096         void *next_dsd;
1097         uint8_t avail_dsds = 0;
1098         uint32_t dsd_list_len;
1099         struct dsd_dma *dsd_ptr;
1100         struct scatterlist *sg, *sgl;
1101         int     i;
1102         struct scsi_cmnd *cmd;
1103         uint32_t *cur_dsd = dsd;
1104         uint16_t used_dsds = tot_dsds;
1105         struct scsi_qla_host *vha;
1106
1107         if (sp) {
1108                 cmd = GET_CMD_SP(sp);
1109                 sgl = scsi_prot_sglist(cmd);
1110                 vha = sp->vha;
1111         } else if (tc) {
1112                 vha = tc->vha;
1113                 sgl = tc->prot_sg;
1114         } else {
1115                 BUG();
1116                 return 1;
1117         }
1118
1119         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1120                 "%s: enter\n", __func__);
1121
1122         for_each_sg(sgl, sg, tot_dsds, i) {
1123                 dma_addr_t      sle_dma;
1124
1125                 /* Allocate additional continuation packets? */
1126                 if (avail_dsds == 0) {
1127                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1128                                                 QLA_DSDS_PER_IOCB : used_dsds;
1129                         dsd_list_len = (avail_dsds + 1) * 12;
1130                         used_dsds -= avail_dsds;
1131
1132                         /* allocate tracking DS */
1133                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1134                         if (!dsd_ptr)
1135                                 return 1;
1136
1137                         /* allocate new list */
1138                         dsd_ptr->dsd_addr = next_dsd =
1139                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1140                                 &dsd_ptr->dsd_list_dma);
1141
1142                         if (!next_dsd) {
1143                                 /*
1144                                  * Need to cleanup only this dsd_ptr, rest
1145                                  * will be done by sp_free_dma()
1146                                  */
1147                                 kfree(dsd_ptr);
1148                                 return 1;
1149                         }
1150
1151                         if (sp) {
1152                                 list_add_tail(&dsd_ptr->list,
1153                                     &((struct crc_context *)
1154                                             sp->u.scmd.ctx)->dsd_list);
1155
1156                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1157                         } else {
1158                                 list_add_tail(&dsd_ptr->list,
1159                                     &(tc->ctx->dsd_list));
1160                                 *tc->ctx_dsd_alloced = 1;
1161                         }
1162
1163                         /* add new list to cmd iocb or last list */
1164                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1165                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1166                         *cur_dsd++ = dsd_list_len;
1167                         cur_dsd = (uint32_t *)next_dsd;
1168                 }
1169                 sle_dma = sg_dma_address(sg);
1170
1171                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1172                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1173                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1174
1175                 avail_dsds--;
1176         }
1177         /* Null termination */
1178         *cur_dsd++ = 0;
1179         *cur_dsd++ = 0;
1180         *cur_dsd++ = 0;
1181         return 0;
1182 }
1183
1184 /**
1185  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1186  *                                                      Type 6 IOCB types.
1187  *
1188  * @sp: SRB command to process
1189  * @cmd_pkt: Command type 3 IOCB
1190  * @tot_dsds: Total number of segments to transfer
1191  */
1192 inline int
1193 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1194     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1195 {
1196         uint32_t                *cur_dsd, *fcp_dl;
1197         scsi_qla_host_t         *vha;
1198         struct scsi_cmnd        *cmd;
1199         uint32_t                total_bytes = 0;
1200         uint32_t                data_bytes;
1201         uint32_t                dif_bytes;
1202         uint8_t                 bundling = 1;
1203         uint16_t                blk_size;
1204         uint8_t                 *clr_ptr;
1205         struct crc_context      *crc_ctx_pkt = NULL;
1206         struct qla_hw_data      *ha;
1207         uint8_t                 additional_fcpcdb_len;
1208         uint16_t                fcp_cmnd_len;
1209         struct fcp_cmnd         *fcp_cmnd;
1210         dma_addr_t              crc_ctx_dma;
1211
1212         cmd = GET_CMD_SP(sp);
1213
1214         /* Update entry type to indicate Command Type CRC_2 IOCB */
1215         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1216
1217         vha = sp->vha;
1218         ha = vha->hw;
1219
1220         /* No data transfer */
1221         data_bytes = scsi_bufflen(cmd);
1222         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1223                 cmd_pkt->byte_count = cpu_to_le32(0);
1224                 return QLA_SUCCESS;
1225         }
1226
1227         cmd_pkt->vp_index = sp->vha->vp_idx;
1228
1229         /* Set transfer direction */
1230         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1231                 cmd_pkt->control_flags =
1232                     cpu_to_le16(CF_WRITE_DATA);
1233         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1234                 cmd_pkt->control_flags =
1235                     cpu_to_le16(CF_READ_DATA);
1236         }
1237
1238         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1239             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1240             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1241             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1242                 bundling = 0;
1243
1244         /* Allocate CRC context from global pool */
1245         crc_ctx_pkt = sp->u.scmd.ctx =
1246             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1247
1248         if (!crc_ctx_pkt)
1249                 goto crc_queuing_error;
1250
1251         /* Zero out CTX area. */
1252         clr_ptr = (uint8_t *)crc_ctx_pkt;
1253         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1254
1255         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1256
1257         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1258
1259         /* Set handle */
1260         crc_ctx_pkt->handle = cmd_pkt->handle;
1261
1262         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1263
1264         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1265             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1266
1267         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1268         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1269         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1270
1271         /* Determine SCSI command length -- align to 4 byte boundary */
1272         if (cmd->cmd_len > 16) {
1273                 additional_fcpcdb_len = cmd->cmd_len - 16;
1274                 if ((cmd->cmd_len % 4) != 0) {
1275                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1276                         goto crc_queuing_error;
1277                 }
1278                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1279         } else {
1280                 additional_fcpcdb_len = 0;
1281                 fcp_cmnd_len = 12 + 16 + 4;
1282         }
1283
1284         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1285
1286         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1287         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1288                 fcp_cmnd->additional_cdb_len |= 1;
1289         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1290                 fcp_cmnd->additional_cdb_len |= 2;
1291
1292         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1293         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1294         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1295         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1296             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1297         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1298             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299         fcp_cmnd->task_management = 0;
1300         fcp_cmnd->task_attribute = TSK_SIMPLE;
1301
1302         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1303
1304         /* Compute dif len and adjust data len to incude protection */
1305         dif_bytes = 0;
1306         blk_size = cmd->device->sector_size;
1307         dif_bytes = (data_bytes / blk_size) * 8;
1308
1309         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1310         case SCSI_PROT_READ_INSERT:
1311         case SCSI_PROT_WRITE_STRIP:
1312             total_bytes = data_bytes;
1313             data_bytes += dif_bytes;
1314             break;
1315
1316         case SCSI_PROT_READ_STRIP:
1317         case SCSI_PROT_WRITE_INSERT:
1318         case SCSI_PROT_READ_PASS:
1319         case SCSI_PROT_WRITE_PASS:
1320             total_bytes = data_bytes + dif_bytes;
1321             break;
1322         default:
1323             BUG();
1324         }
1325
1326         if (!qla2x00_hba_err_chk_enabled(sp))
1327                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1328         /* HBA error checking enabled */
1329         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1330                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1331                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1332                         SCSI_PROT_DIF_TYPE2))
1333                         fw_prot_opts |= BIT_10;
1334                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1335                     SCSI_PROT_DIF_TYPE3)
1336                         fw_prot_opts |= BIT_11;
1337         }
1338
1339         if (!bundling) {
1340                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1341         } else {
1342                 /*
1343                  * Configure Bundling if we need to fetch interlaving
1344                  * protection PCI accesses
1345                  */
1346                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1347                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1348                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1349                                                         tot_prot_dsds);
1350                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1351         }
1352
1353         /* Finish the common fields of CRC pkt */
1354         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1355         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1356         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1357         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1358         /* Fibre channel byte count */
1359         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1360         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1361             additional_fcpcdb_len);
1362         *fcp_dl = htonl(total_bytes);
1363
1364         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1365                 cmd_pkt->byte_count = cpu_to_le32(0);
1366                 return QLA_SUCCESS;
1367         }
1368         /* Walks data segments */
1369
1370         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1371
1372         if (!bundling && tot_prot_dsds) {
1373                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1374                         cur_dsd, tot_dsds, NULL))
1375                         goto crc_queuing_error;
1376         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1377                         (tot_dsds - tot_prot_dsds), NULL))
1378                 goto crc_queuing_error;
1379
1380         if (bundling && tot_prot_dsds) {
1381                 /* Walks dif segments */
1382                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1383                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1384                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1385                                 tot_prot_dsds, NULL))
1386                         goto crc_queuing_error;
1387         }
1388         return QLA_SUCCESS;
1389
1390 crc_queuing_error:
1391         /* Cleanup will be performed by the caller */
1392
1393         return QLA_FUNCTION_FAILED;
1394 }
1395
1396 /**
1397  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1398  * @sp: command to send to the ISP
1399  *
1400  * Returns non-zero if a failure occurred, else zero.
1401  */
1402 int
1403 qla24xx_start_scsi(srb_t *sp)
1404 {
1405         int             nseg;
1406         unsigned long   flags;
1407         uint32_t        *clr_ptr;
1408         uint32_t        index;
1409         uint32_t        handle;
1410         struct cmd_type_7 *cmd_pkt;
1411         uint16_t        cnt;
1412         uint16_t        req_cnt;
1413         uint16_t        tot_dsds;
1414         struct req_que *req = NULL;
1415         struct rsp_que *rsp = NULL;
1416         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1417         struct scsi_qla_host *vha = sp->vha;
1418         struct qla_hw_data *ha = vha->hw;
1419
1420         /* Setup device pointers. */
1421         req = vha->req;
1422         rsp = req->rsp;
1423
1424         /* So we know we haven't pci_map'ed anything yet */
1425         tot_dsds = 0;
1426
1427         /* Send marker if required */
1428         if (vha->marker_needed != 0) {
1429                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1430                     QLA_SUCCESS)
1431                         return QLA_FUNCTION_FAILED;
1432                 vha->marker_needed = 0;
1433         }
1434
1435         /* Acquire ring specific lock */
1436         spin_lock_irqsave(&ha->hardware_lock, flags);
1437
1438         /* Check for room in outstanding command list. */
1439         handle = req->current_outstanding_cmd;
1440         for (index = 1; index < req->num_outstanding_cmds; index++) {
1441                 handle++;
1442                 if (handle == req->num_outstanding_cmds)
1443                         handle = 1;
1444                 if (!req->outstanding_cmds[handle])
1445                         break;
1446         }
1447         if (index == req->num_outstanding_cmds)
1448                 goto queuing_error;
1449
1450         /* Map the sg table so we have an accurate count of sg entries needed */
1451         if (scsi_sg_count(cmd)) {
1452                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1453                     scsi_sg_count(cmd), cmd->sc_data_direction);
1454                 if (unlikely(!nseg))
1455                         goto queuing_error;
1456         } else
1457                 nseg = 0;
1458
1459         tot_dsds = nseg;
1460         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1461         if (req->cnt < (req_cnt + 2)) {
1462                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1463                     RD_REG_DWORD_RELAXED(req->req_q_out);
1464                 if (req->ring_index < cnt)
1465                         req->cnt = cnt - req->ring_index;
1466                 else
1467                         req->cnt = req->length -
1468                                 (req->ring_index - cnt);
1469                 if (req->cnt < (req_cnt + 2))
1470                         goto queuing_error;
1471         }
1472
1473         /* Build command packet. */
1474         req->current_outstanding_cmd = handle;
1475         req->outstanding_cmds[handle] = sp;
1476         sp->handle = handle;
1477         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1478         req->cnt -= req_cnt;
1479
1480         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1481         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1482
1483         /* Zero out remaining portion of packet. */
1484         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1485         clr_ptr = (uint32_t *)cmd_pkt + 2;
1486         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1487         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1488
1489         /* Set NPORT-ID and LUN number*/
1490         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1491         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1492         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1493         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1494         cmd_pkt->vp_index = sp->vha->vp_idx;
1495
1496         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1497         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1498
1499         cmd_pkt->task = TSK_SIMPLE;
1500
1501         /* Load SCSI command packet. */
1502         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1503         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1504
1505         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1506
1507         /* Build IOCB segments */
1508         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1509
1510         /* Set total data segment count. */
1511         cmd_pkt->entry_count = (uint8_t)req_cnt;
1512         wmb();
1513         /* Adjust ring index. */
1514         req->ring_index++;
1515         if (req->ring_index == req->length) {
1516                 req->ring_index = 0;
1517                 req->ring_ptr = req->ring;
1518         } else
1519                 req->ring_ptr++;
1520
1521         sp->flags |= SRB_DMA_VALID;
1522
1523         /* Set chip new ring index. */
1524         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1525         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1526
1527         /* Manage unprocessed RIO/ZIO commands in response queue. */
1528         if (vha->flags.process_response_queue &&
1529                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1530                 qla24xx_process_response_queue(vha, rsp);
1531
1532         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1533         return QLA_SUCCESS;
1534
1535 queuing_error:
1536         if (tot_dsds)
1537                 scsi_dma_unmap(cmd);
1538
1539         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1540
1541         return QLA_FUNCTION_FAILED;
1542 }
1543
1544 /**
1545  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1546  * @sp: command to send to the ISP
1547  *
1548  * Returns non-zero if a failure occurred, else zero.
1549  */
1550 int
1551 qla24xx_dif_start_scsi(srb_t *sp)
1552 {
1553         int                     nseg;
1554         unsigned long           flags;
1555         uint32_t                *clr_ptr;
1556         uint32_t                index;
1557         uint32_t                handle;
1558         uint16_t                cnt;
1559         uint16_t                req_cnt = 0;
1560         uint16_t                tot_dsds;
1561         uint16_t                tot_prot_dsds;
1562         uint16_t                fw_prot_opts = 0;
1563         struct req_que          *req = NULL;
1564         struct rsp_que          *rsp = NULL;
1565         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1566         struct scsi_qla_host    *vha = sp->vha;
1567         struct qla_hw_data      *ha = vha->hw;
1568         struct cmd_type_crc_2   *cmd_pkt;
1569         uint32_t                status = 0;
1570
1571 #define QDSS_GOT_Q_SPACE        BIT_0
1572
1573         /* Only process protection or >16 cdb in this routine */
1574         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1575                 if (cmd->cmd_len <= 16)
1576                         return qla24xx_start_scsi(sp);
1577         }
1578
1579         /* Setup device pointers. */
1580         req = vha->req;
1581         rsp = req->rsp;
1582
1583         /* So we know we haven't pci_map'ed anything yet */
1584         tot_dsds = 0;
1585
1586         /* Send marker if required */
1587         if (vha->marker_needed != 0) {
1588                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1589                     QLA_SUCCESS)
1590                         return QLA_FUNCTION_FAILED;
1591                 vha->marker_needed = 0;
1592         }
1593
1594         /* Acquire ring specific lock */
1595         spin_lock_irqsave(&ha->hardware_lock, flags);
1596
1597         /* Check for room in outstanding command list. */
1598         handle = req->current_outstanding_cmd;
1599         for (index = 1; index < req->num_outstanding_cmds; index++) {
1600                 handle++;
1601                 if (handle == req->num_outstanding_cmds)
1602                         handle = 1;
1603                 if (!req->outstanding_cmds[handle])
1604                         break;
1605         }
1606
1607         if (index == req->num_outstanding_cmds)
1608                 goto queuing_error;
1609
1610         /* Compute number of required data segments */
1611         /* Map the sg table so we have an accurate count of sg entries needed */
1612         if (scsi_sg_count(cmd)) {
1613                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1614                     scsi_sg_count(cmd), cmd->sc_data_direction);
1615                 if (unlikely(!nseg))
1616                         goto queuing_error;
1617                 else
1618                         sp->flags |= SRB_DMA_VALID;
1619
1620                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1621                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1622                         struct qla2_sgx sgx;
1623                         uint32_t        partial;
1624
1625                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1626                         sgx.tot_bytes = scsi_bufflen(cmd);
1627                         sgx.cur_sg = scsi_sglist(cmd);
1628                         sgx.sp = sp;
1629
1630                         nseg = 0;
1631                         while (qla24xx_get_one_block_sg(
1632                             cmd->device->sector_size, &sgx, &partial))
1633                                 nseg++;
1634                 }
1635         } else
1636                 nseg = 0;
1637
1638         /* number of required data segments */
1639         tot_dsds = nseg;
1640
1641         /* Compute number of required protection segments */
1642         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1643                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1644                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1645                 if (unlikely(!nseg))
1646                         goto queuing_error;
1647                 else
1648                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1649
1650                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1651                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1652                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1653                 }
1654         } else {
1655                 nseg = 0;
1656         }
1657
1658         req_cnt = 1;
1659         /* Total Data and protection sg segment(s) */
1660         tot_prot_dsds = nseg;
1661         tot_dsds += nseg;
1662         if (req->cnt < (req_cnt + 2)) {
1663                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1664                     RD_REG_DWORD_RELAXED(req->req_q_out);
1665                 if (req->ring_index < cnt)
1666                         req->cnt = cnt - req->ring_index;
1667                 else
1668                         req->cnt = req->length -
1669                                 (req->ring_index - cnt);
1670                 if (req->cnt < (req_cnt + 2))
1671                         goto queuing_error;
1672         }
1673
1674         status |= QDSS_GOT_Q_SPACE;
1675
1676         /* Build header part of command packet (excluding the OPCODE). */
1677         req->current_outstanding_cmd = handle;
1678         req->outstanding_cmds[handle] = sp;
1679         sp->handle = handle;
1680         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1681         req->cnt -= req_cnt;
1682
1683         /* Fill-in common area */
1684         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1685         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1686
1687         clr_ptr = (uint32_t *)cmd_pkt + 2;
1688         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1689
1690         /* Set NPORT-ID and LUN number*/
1691         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1692         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1693         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1694         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1695
1696         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1697         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1698
1699         /* Total Data and protection segment(s) */
1700         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1701
1702         /* Build IOCB segments and adjust for data protection segments */
1703         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1704             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1705                 QLA_SUCCESS)
1706                 goto queuing_error;
1707
1708         cmd_pkt->entry_count = (uint8_t)req_cnt;
1709         /* Specify response queue number where completion should happen */
1710         cmd_pkt->entry_status = (uint8_t) rsp->id;
1711         cmd_pkt->timeout = cpu_to_le16(0);
1712         wmb();
1713
1714         /* Adjust ring index. */
1715         req->ring_index++;
1716         if (req->ring_index == req->length) {
1717                 req->ring_index = 0;
1718                 req->ring_ptr = req->ring;
1719         } else
1720                 req->ring_ptr++;
1721
1722         /* Set chip new ring index. */
1723         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1724         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1725
1726         /* Manage unprocessed RIO/ZIO commands in response queue. */
1727         if (vha->flags.process_response_queue &&
1728             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1729                 qla24xx_process_response_queue(vha, rsp);
1730
1731         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1732
1733         return QLA_SUCCESS;
1734
1735 queuing_error:
1736         if (status & QDSS_GOT_Q_SPACE) {
1737                 req->outstanding_cmds[handle] = NULL;
1738                 req->cnt += req_cnt;
1739         }
1740         /* Cleanup will be performed by the caller (queuecommand) */
1741
1742         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1743         return QLA_FUNCTION_FAILED;
1744 }
1745
1746 /**
1747  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1748  * @sp: command to send to the ISP
1749  *
1750  * Returns non-zero if a failure occurred, else zero.
1751  */
1752 static int
1753 qla2xxx_start_scsi_mq(srb_t *sp)
1754 {
1755         int             nseg;
1756         unsigned long   flags;
1757         uint32_t        *clr_ptr;
1758         uint32_t        index;
1759         uint32_t        handle;
1760         struct cmd_type_7 *cmd_pkt;
1761         uint16_t        cnt;
1762         uint16_t        req_cnt;
1763         uint16_t        tot_dsds;
1764         struct req_que *req = NULL;
1765         struct rsp_que *rsp = NULL;
1766         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1767         struct scsi_qla_host *vha = sp->fcport->vha;
1768         struct qla_hw_data *ha = vha->hw;
1769         struct qla_qpair *qpair = sp->qpair;
1770
1771         /* Setup qpair pointers */
1772         rsp = qpair->rsp;
1773         req = qpair->req;
1774
1775         /* So we know we haven't pci_map'ed anything yet */
1776         tot_dsds = 0;
1777
1778         /* Send marker if required */
1779         if (vha->marker_needed != 0) {
1780                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1781                     QLA_SUCCESS)
1782                         return QLA_FUNCTION_FAILED;
1783                 vha->marker_needed = 0;
1784         }
1785
1786         /* Acquire qpair specific lock */
1787         spin_lock_irqsave(&qpair->qp_lock, flags);
1788
1789         /* Check for room in outstanding command list. */
1790         handle = req->current_outstanding_cmd;
1791         for (index = 1; index < req->num_outstanding_cmds; index++) {
1792                 handle++;
1793                 if (handle == req->num_outstanding_cmds)
1794                         handle = 1;
1795                 if (!req->outstanding_cmds[handle])
1796                         break;
1797         }
1798         if (index == req->num_outstanding_cmds)
1799                 goto queuing_error;
1800
1801         /* Map the sg table so we have an accurate count of sg entries needed */
1802         if (scsi_sg_count(cmd)) {
1803                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1804                     scsi_sg_count(cmd), cmd->sc_data_direction);
1805                 if (unlikely(!nseg))
1806                         goto queuing_error;
1807         } else
1808                 nseg = 0;
1809
1810         tot_dsds = nseg;
1811         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1812         if (req->cnt < (req_cnt + 2)) {
1813                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1814                     RD_REG_DWORD_RELAXED(req->req_q_out);
1815                 if (req->ring_index < cnt)
1816                         req->cnt = cnt - req->ring_index;
1817                 else
1818                         req->cnt = req->length -
1819                                 (req->ring_index - cnt);
1820                 if (req->cnt < (req_cnt + 2))
1821                         goto queuing_error;
1822         }
1823
1824         /* Build command packet. */
1825         req->current_outstanding_cmd = handle;
1826         req->outstanding_cmds[handle] = sp;
1827         sp->handle = handle;
1828         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1829         req->cnt -= req_cnt;
1830
1831         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1832         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1833
1834         /* Zero out remaining portion of packet. */
1835         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1836         clr_ptr = (uint32_t *)cmd_pkt + 2;
1837         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1838         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1839
1840         /* Set NPORT-ID and LUN number*/
1841         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1842         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1843         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1844         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1845         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1846
1847         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1848         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1849
1850         cmd_pkt->task = TSK_SIMPLE;
1851
1852         /* Load SCSI command packet. */
1853         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1854         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1855
1856         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1857
1858         /* Build IOCB segments */
1859         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1860
1861         /* Set total data segment count. */
1862         cmd_pkt->entry_count = (uint8_t)req_cnt;
1863         wmb();
1864         /* Adjust ring index. */
1865         req->ring_index++;
1866         if (req->ring_index == req->length) {
1867                 req->ring_index = 0;
1868                 req->ring_ptr = req->ring;
1869         } else
1870                 req->ring_ptr++;
1871
1872         sp->flags |= SRB_DMA_VALID;
1873
1874         /* Set chip new ring index. */
1875         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1876
1877         /* Manage unprocessed RIO/ZIO commands in response queue. */
1878         if (vha->flags.process_response_queue &&
1879                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1880                 qla24xx_process_response_queue(vha, rsp);
1881
1882         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1883         return QLA_SUCCESS;
1884
1885 queuing_error:
1886         if (tot_dsds)
1887                 scsi_dma_unmap(cmd);
1888
1889         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1890
1891         return QLA_FUNCTION_FAILED;
1892 }
1893
1894
1895 /**
1896  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1897  * @sp: command to send to the ISP
1898  *
1899  * Returns non-zero if a failure occurred, else zero.
1900  */
1901 int
1902 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1903 {
1904         int                     nseg;
1905         unsigned long           flags;
1906         uint32_t                *clr_ptr;
1907         uint32_t                index;
1908         uint32_t                handle;
1909         uint16_t                cnt;
1910         uint16_t                req_cnt = 0;
1911         uint16_t                tot_dsds;
1912         uint16_t                tot_prot_dsds;
1913         uint16_t                fw_prot_opts = 0;
1914         struct req_que          *req = NULL;
1915         struct rsp_que          *rsp = NULL;
1916         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1917         struct scsi_qla_host    *vha = sp->fcport->vha;
1918         struct qla_hw_data      *ha = vha->hw;
1919         struct cmd_type_crc_2   *cmd_pkt;
1920         uint32_t                status = 0;
1921         struct qla_qpair        *qpair = sp->qpair;
1922
1923 #define QDSS_GOT_Q_SPACE        BIT_0
1924
1925         /* Check for host side state */
1926         if (!qpair->online) {
1927                 cmd->result = DID_NO_CONNECT << 16;
1928                 return QLA_INTERFACE_ERROR;
1929         }
1930
1931         if (!qpair->difdix_supported &&
1932                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1933                 cmd->result = DID_NO_CONNECT << 16;
1934                 return QLA_INTERFACE_ERROR;
1935         }
1936
1937         /* Only process protection or >16 cdb in this routine */
1938         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1939                 if (cmd->cmd_len <= 16)
1940                         return qla2xxx_start_scsi_mq(sp);
1941         }
1942
1943         /* Setup qpair pointers */
1944         rsp = qpair->rsp;
1945         req = qpair->req;
1946
1947         /* So we know we haven't pci_map'ed anything yet */
1948         tot_dsds = 0;
1949
1950         /* Send marker if required */
1951         if (vha->marker_needed != 0) {
1952                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1953                     QLA_SUCCESS)
1954                         return QLA_FUNCTION_FAILED;
1955                 vha->marker_needed = 0;
1956         }
1957
1958         /* Acquire ring specific lock */
1959         spin_lock_irqsave(&qpair->qp_lock, flags);
1960
1961         /* Check for room in outstanding command list. */
1962         handle = req->current_outstanding_cmd;
1963         for (index = 1; index < req->num_outstanding_cmds; index++) {
1964                 handle++;
1965                 if (handle == req->num_outstanding_cmds)
1966                         handle = 1;
1967                 if (!req->outstanding_cmds[handle])
1968                         break;
1969         }
1970
1971         if (index == req->num_outstanding_cmds)
1972                 goto queuing_error;
1973
1974         /* Compute number of required data segments */
1975         /* Map the sg table so we have an accurate count of sg entries needed */
1976         if (scsi_sg_count(cmd)) {
1977                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1978                     scsi_sg_count(cmd), cmd->sc_data_direction);
1979                 if (unlikely(!nseg))
1980                         goto queuing_error;
1981                 else
1982                         sp->flags |= SRB_DMA_VALID;
1983
1984                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1985                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1986                         struct qla2_sgx sgx;
1987                         uint32_t        partial;
1988
1989                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1990                         sgx.tot_bytes = scsi_bufflen(cmd);
1991                         sgx.cur_sg = scsi_sglist(cmd);
1992                         sgx.sp = sp;
1993
1994                         nseg = 0;
1995                         while (qla24xx_get_one_block_sg(
1996                             cmd->device->sector_size, &sgx, &partial))
1997                                 nseg++;
1998                 }
1999         } else
2000                 nseg = 0;
2001
2002         /* number of required data segments */
2003         tot_dsds = nseg;
2004
2005         /* Compute number of required protection segments */
2006         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2007                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2008                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2009                 if (unlikely(!nseg))
2010                         goto queuing_error;
2011                 else
2012                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2013
2014                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2015                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2016                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2017                 }
2018         } else {
2019                 nseg = 0;
2020         }
2021
2022         req_cnt = 1;
2023         /* Total Data and protection sg segment(s) */
2024         tot_prot_dsds = nseg;
2025         tot_dsds += nseg;
2026         if (req->cnt < (req_cnt + 2)) {
2027                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2028                     RD_REG_DWORD_RELAXED(req->req_q_out);
2029                 if (req->ring_index < cnt)
2030                         req->cnt = cnt - req->ring_index;
2031                 else
2032                         req->cnt = req->length -
2033                                 (req->ring_index - cnt);
2034                 if (req->cnt < (req_cnt + 2))
2035                         goto queuing_error;
2036         }
2037
2038         status |= QDSS_GOT_Q_SPACE;
2039
2040         /* Build header part of command packet (excluding the OPCODE). */
2041         req->current_outstanding_cmd = handle;
2042         req->outstanding_cmds[handle] = sp;
2043         sp->handle = handle;
2044         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2045         req->cnt -= req_cnt;
2046
2047         /* Fill-in common area */
2048         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2049         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2050
2051         clr_ptr = (uint32_t *)cmd_pkt + 2;
2052         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2053
2054         /* Set NPORT-ID and LUN number*/
2055         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2056         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2057         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2058         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2059
2060         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2061         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2062
2063         /* Total Data and protection segment(s) */
2064         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2065
2066         /* Build IOCB segments and adjust for data protection segments */
2067         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2068             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2069                 QLA_SUCCESS)
2070                 goto queuing_error;
2071
2072         cmd_pkt->entry_count = (uint8_t)req_cnt;
2073         cmd_pkt->timeout = cpu_to_le16(0);
2074         wmb();
2075
2076         /* Adjust ring index. */
2077         req->ring_index++;
2078         if (req->ring_index == req->length) {
2079                 req->ring_index = 0;
2080                 req->ring_ptr = req->ring;
2081         } else
2082                 req->ring_ptr++;
2083
2084         /* Set chip new ring index. */
2085         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2086
2087         /* Manage unprocessed RIO/ZIO commands in response queue. */
2088         if (vha->flags.process_response_queue &&
2089             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2090                 qla24xx_process_response_queue(vha, rsp);
2091
2092         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2093
2094         return QLA_SUCCESS;
2095
2096 queuing_error:
2097         if (status & QDSS_GOT_Q_SPACE) {
2098                 req->outstanding_cmds[handle] = NULL;
2099                 req->cnt += req_cnt;
2100         }
2101         /* Cleanup will be performed by the caller (queuecommand) */
2102
2103         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2104         return QLA_FUNCTION_FAILED;
2105 }
2106
2107 /* Generic Control-SRB manipulation functions. */
2108
2109 /* hardware_lock assumed to be held. */
2110 void *
2111 qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
2112 {
2113         if (qla2x00_reset_active(vha))
2114                 return NULL;
2115
2116         return qla2x00_alloc_iocbs(vha, sp);
2117 }
2118
2119 void *
2120 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
2121 {
2122         struct qla_hw_data *ha = vha->hw;
2123         struct req_que *req = ha->req_q_map[0];
2124         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2125         uint32_t index, handle;
2126         request_t *pkt;
2127         uint16_t cnt, req_cnt;
2128
2129         pkt = NULL;
2130         req_cnt = 1;
2131         handle = 0;
2132
2133         if (!sp)
2134                 goto skip_cmd_array;
2135
2136         /* Check for room in outstanding command list. */
2137         handle = req->current_outstanding_cmd;
2138         for (index = 1; index < req->num_outstanding_cmds; index++) {
2139                 handle++;
2140                 if (handle == req->num_outstanding_cmds)
2141                         handle = 1;
2142                 if (!req->outstanding_cmds[handle])
2143                         break;
2144         }
2145         if (index == req->num_outstanding_cmds) {
2146                 ql_log(ql_log_warn, vha, 0x700b,
2147                     "No room on outstanding cmd array.\n");
2148                 goto queuing_error;
2149         }
2150
2151         /* Prep command array. */
2152         req->current_outstanding_cmd = handle;
2153         req->outstanding_cmds[handle] = sp;
2154         sp->handle = handle;
2155
2156         /* Adjust entry-counts as needed. */
2157         if (sp->type != SRB_SCSI_CMD)
2158                 req_cnt = sp->iocbs;
2159
2160 skip_cmd_array:
2161         /* Check for room on request queue. */
2162         if (req->cnt < req_cnt + 2) {
2163                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2164                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2165                 else if (IS_P3P_TYPE(ha))
2166                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2167                 else if (IS_FWI2_CAPABLE(ha))
2168                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2169                 else if (IS_QLAFX00(ha))
2170                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2171                 else
2172                         cnt = qla2x00_debounce_register(
2173                             ISP_REQ_Q_OUT(ha, &reg->isp));
2174
2175                 if  (req->ring_index < cnt)
2176                         req->cnt = cnt - req->ring_index;
2177                 else
2178                         req->cnt = req->length -
2179                             (req->ring_index - cnt);
2180         }
2181         if (req->cnt < req_cnt + 2)
2182                 goto queuing_error;
2183
2184         /* Prep packet */
2185         req->cnt -= req_cnt;
2186         pkt = req->ring_ptr;
2187         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2188         if (IS_QLAFX00(ha)) {
2189                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2190                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2191         } else {
2192                 pkt->entry_count = req_cnt;
2193                 pkt->handle = handle;
2194         }
2195
2196 queuing_error:
2197         vha->tgt_counters.num_alloc_iocb_failed++;
2198         return pkt;
2199 }
2200
2201 static void
2202 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2203 {
2204         struct srb_iocb *lio = &sp->u.iocb_cmd;
2205
2206         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2207         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2208         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2209                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2210         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2211                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2212         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2213         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2214         logio->port_id[1] = sp->fcport->d_id.b.area;
2215         logio->port_id[2] = sp->fcport->d_id.b.domain;
2216         logio->vp_index = sp->vha->vp_idx;
2217 }
2218
2219 static void
2220 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2221 {
2222         struct qla_hw_data *ha = sp->vha->hw;
2223         struct srb_iocb *lio = &sp->u.iocb_cmd;
2224         uint16_t opts;
2225
2226         mbx->entry_type = MBX_IOCB_TYPE;
2227         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2228         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2229         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2230         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2231         if (HAS_EXTENDED_IDS(ha)) {
2232                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2233                 mbx->mb10 = cpu_to_le16(opts);
2234         } else {
2235                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2236         }
2237         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2238         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2239             sp->fcport->d_id.b.al_pa);
2240         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2241 }
2242
2243 static void
2244 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2245 {
2246         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2247         logio->control_flags =
2248             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2249         if (!sp->fcport->se_sess ||
2250             !sp->fcport->keep_nport_handle)
2251                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2252         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2253         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2254         logio->port_id[1] = sp->fcport->d_id.b.area;
2255         logio->port_id[2] = sp->fcport->d_id.b.domain;
2256         logio->vp_index = sp->vha->vp_idx;
2257 }
2258
2259 static void
2260 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2261 {
2262         struct qla_hw_data *ha = sp->vha->hw;
2263
2264         mbx->entry_type = MBX_IOCB_TYPE;
2265         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2266         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2267         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2268             cpu_to_le16(sp->fcport->loop_id):
2269             cpu_to_le16(sp->fcport->loop_id << 8);
2270         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2271         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2272             sp->fcport->d_id.b.al_pa);
2273         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2274         /* Implicit: mbx->mbx10 = 0. */
2275 }
2276
2277 static void
2278 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2279 {
2280         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2281         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2282         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2283         logio->vp_index = sp->vha->vp_idx;
2284 }
2285
2286 static void
2287 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2288 {
2289         struct qla_hw_data *ha = sp->vha->hw;
2290
2291         mbx->entry_type = MBX_IOCB_TYPE;
2292         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2293         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2294         if (HAS_EXTENDED_IDS(ha)) {
2295                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2296                 mbx->mb10 = cpu_to_le16(BIT_0);
2297         } else {
2298                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2299         }
2300         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2301         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2302         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2303         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2304         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2305 }
2306
2307 static void
2308 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2309 {
2310         uint32_t flags;
2311         uint64_t lun;
2312         struct fc_port *fcport = sp->fcport;
2313         scsi_qla_host_t *vha = fcport->vha;
2314         struct qla_hw_data *ha = vha->hw;
2315         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2316         struct req_que *req = vha->req;
2317
2318         flags = iocb->u.tmf.flags;
2319         lun = iocb->u.tmf.lun;
2320
2321         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2322         tsk->entry_count = 1;
2323         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2324         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2325         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2326         tsk->control_flags = cpu_to_le32(flags);
2327         tsk->port_id[0] = fcport->d_id.b.al_pa;
2328         tsk->port_id[1] = fcport->d_id.b.area;
2329         tsk->port_id[2] = fcport->d_id.b.domain;
2330         tsk->vp_index = fcport->vha->vp_idx;
2331
2332         if (flags == TCF_LUN_RESET) {
2333                 int_to_scsilun(lun, &tsk->lun);
2334                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2335                         sizeof(tsk->lun));
2336         }
2337 }
2338
2339 static void
2340 qla2x00_els_dcmd_sp_free(void *data)
2341 {
2342         srb_t *sp = data;
2343         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2344
2345         kfree(sp->fcport);
2346
2347         if (elsio->u.els_logo.els_logo_pyld)
2348                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2349                     elsio->u.els_logo.els_logo_pyld,
2350                     elsio->u.els_logo.els_logo_pyld_dma);
2351
2352         del_timer(&elsio->timer);
2353         qla2x00_rel_sp(sp);
2354 }
2355
2356 static void
2357 qla2x00_els_dcmd_iocb_timeout(void *data)
2358 {
2359         srb_t *sp = data;
2360         fc_port_t *fcport = sp->fcport;
2361         struct scsi_qla_host *vha = sp->vha;
2362         struct qla_hw_data *ha = vha->hw;
2363         struct srb_iocb *lio = &sp->u.iocb_cmd;
2364         unsigned long flags = 0;
2365
2366         ql_dbg(ql_dbg_io, vha, 0x3069,
2367             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2368             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2369             fcport->d_id.b.al_pa);
2370
2371         /* Abort the exchange */
2372         spin_lock_irqsave(&ha->hardware_lock, flags);
2373         if (ha->isp_ops->abort_command(sp)) {
2374                 ql_dbg(ql_dbg_io, vha, 0x3070,
2375                     "mbx abort_command failed.\n");
2376         } else {
2377                 ql_dbg(ql_dbg_io, vha, 0x3071,
2378                     "mbx abort_command success.\n");
2379         }
2380         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2381
2382         complete(&lio->u.els_logo.comp);
2383 }
2384
2385 static void
2386 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2387 {
2388         srb_t *sp = ptr;
2389         fc_port_t *fcport = sp->fcport;
2390         struct srb_iocb *lio = &sp->u.iocb_cmd;
2391         struct scsi_qla_host *vha = sp->vha;
2392
2393         ql_dbg(ql_dbg_io, vha, 0x3072,
2394             "%s hdl=%x, portid=%02x%02x%02x done\n",
2395             sp->name, sp->handle, fcport->d_id.b.domain,
2396             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2397
2398         complete(&lio->u.els_logo.comp);
2399 }
2400
2401 int
2402 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2403     port_id_t remote_did)
2404 {
2405         srb_t *sp;
2406         fc_port_t *fcport = NULL;
2407         struct srb_iocb *elsio = NULL;
2408         struct qla_hw_data *ha = vha->hw;
2409         struct els_logo_payload logo_pyld;
2410         int rval = QLA_SUCCESS;
2411
2412         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2413         if (!fcport) {
2414                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2415                return -ENOMEM;
2416         }
2417
2418         /* Alloc SRB structure */
2419         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2420         if (!sp) {
2421                 kfree(fcport);
2422                 ql_log(ql_log_info, vha, 0x70e6,
2423                  "SRB allocation failed\n");
2424                 return -ENOMEM;
2425         }
2426
2427         elsio = &sp->u.iocb_cmd;
2428         fcport->loop_id = 0xFFFF;
2429         fcport->d_id.b.domain = remote_did.b.domain;
2430         fcport->d_id.b.area = remote_did.b.area;
2431         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2432
2433         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2434             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2435
2436         sp->type = SRB_ELS_DCMD;
2437         sp->name = "ELS_DCMD";
2438         sp->fcport = fcport;
2439         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2440         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2441         sp->done = qla2x00_els_dcmd_sp_done;
2442         sp->free = qla2x00_els_dcmd_sp_free;
2443
2444         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2445                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2446                             GFP_KERNEL);
2447
2448         if (!elsio->u.els_logo.els_logo_pyld) {
2449                 sp->free(sp);
2450                 return QLA_FUNCTION_FAILED;
2451         }
2452
2453         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2454
2455         elsio->u.els_logo.els_cmd = els_opcode;
2456         logo_pyld.opcode = els_opcode;
2457         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2458         logo_pyld.s_id[1] = vha->d_id.b.area;
2459         logo_pyld.s_id[2] = vha->d_id.b.domain;
2460         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2461         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2462
2463         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2464             sizeof(struct els_logo_payload));
2465
2466         rval = qla2x00_start_sp(sp);
2467         if (rval != QLA_SUCCESS) {
2468                 sp->free(sp);
2469                 return QLA_FUNCTION_FAILED;
2470         }
2471
2472         ql_dbg(ql_dbg_io, vha, 0x3074,
2473             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2474             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2475             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2476
2477         wait_for_completion(&elsio->u.els_logo.comp);
2478
2479         sp->free(sp);
2480         return rval;
2481 }
2482
2483 static void
2484 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2485 {
2486         scsi_qla_host_t *vha = sp->vha;
2487         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2488
2489         els_iocb->entry_type = ELS_IOCB_TYPE;
2490         els_iocb->entry_count = 1;
2491         els_iocb->sys_define = 0;
2492         els_iocb->entry_status = 0;
2493         els_iocb->handle = sp->handle;
2494         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2495         els_iocb->tx_dsd_count = 1;
2496         els_iocb->vp_index = vha->vp_idx;
2497         els_iocb->sof_type = EST_SOFI3;
2498         els_iocb->rx_dsd_count = 0;
2499         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2500
2501         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2502         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2503         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2504         els_iocb->control_flags = 0;
2505
2506         els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2507         els_iocb->tx_address[0] =
2508             cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2509         els_iocb->tx_address[1] =
2510             cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2511         els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2512
2513         els_iocb->rx_byte_count = 0;
2514         els_iocb->rx_address[0] = 0;
2515         els_iocb->rx_address[1] = 0;
2516         els_iocb->rx_len = 0;
2517
2518         sp->vha->qla_stats.control_requests++;
2519 }
2520
2521 static void
2522 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2523 {
2524         struct bsg_job *bsg_job = sp->u.bsg_job;
2525         struct fc_bsg_request *bsg_request = bsg_job->request;
2526
2527         els_iocb->entry_type = ELS_IOCB_TYPE;
2528         els_iocb->entry_count = 1;
2529         els_iocb->sys_define = 0;
2530         els_iocb->entry_status = 0;
2531         els_iocb->handle = sp->handle;
2532         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2533         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2534         els_iocb->vp_index = sp->vha->vp_idx;
2535         els_iocb->sof_type = EST_SOFI3;
2536         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2537
2538         els_iocb->opcode =
2539             sp->type == SRB_ELS_CMD_RPT ?
2540             bsg_request->rqst_data.r_els.els_code :
2541             bsg_request->rqst_data.h_els.command_code;
2542         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2543         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2544         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2545         els_iocb->control_flags = 0;
2546         els_iocb->rx_byte_count =
2547             cpu_to_le32(bsg_job->reply_payload.payload_len);
2548         els_iocb->tx_byte_count =
2549             cpu_to_le32(bsg_job->request_payload.payload_len);
2550
2551         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2552             (bsg_job->request_payload.sg_list)));
2553         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2554             (bsg_job->request_payload.sg_list)));
2555         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2556             (bsg_job->request_payload.sg_list));
2557
2558         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2559             (bsg_job->reply_payload.sg_list)));
2560         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2561             (bsg_job->reply_payload.sg_list)));
2562         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2563             (bsg_job->reply_payload.sg_list));
2564
2565         sp->vha->qla_stats.control_requests++;
2566 }
2567
2568 static void
2569 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2570 {
2571         uint16_t        avail_dsds;
2572         uint32_t        *cur_dsd;
2573         struct scatterlist *sg;
2574         int index;
2575         uint16_t tot_dsds;
2576         scsi_qla_host_t *vha = sp->vha;
2577         struct qla_hw_data *ha = vha->hw;
2578         struct bsg_job *bsg_job = sp->u.bsg_job;
2579         int loop_iterartion = 0;
2580         int entry_count = 1;
2581
2582         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2583         ct_iocb->entry_type = CT_IOCB_TYPE;
2584         ct_iocb->entry_status = 0;
2585         ct_iocb->handle1 = sp->handle;
2586         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2587         ct_iocb->status = cpu_to_le16(0);
2588         ct_iocb->control_flags = cpu_to_le16(0);
2589         ct_iocb->timeout = 0;
2590         ct_iocb->cmd_dsd_count =
2591             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2592         ct_iocb->total_dsd_count =
2593             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2594         ct_iocb->req_bytecount =
2595             cpu_to_le32(bsg_job->request_payload.payload_len);
2596         ct_iocb->rsp_bytecount =
2597             cpu_to_le32(bsg_job->reply_payload.payload_len);
2598
2599         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2600             (bsg_job->request_payload.sg_list)));
2601         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2602             (bsg_job->request_payload.sg_list)));
2603         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2604
2605         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2606             (bsg_job->reply_payload.sg_list)));
2607         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2608             (bsg_job->reply_payload.sg_list)));
2609         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2610
2611         avail_dsds = 1;
2612         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2613         index = 0;
2614         tot_dsds = bsg_job->reply_payload.sg_cnt;
2615
2616         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2617                 dma_addr_t       sle_dma;
2618                 cont_a64_entry_t *cont_pkt;
2619
2620                 /* Allocate additional continuation packets? */
2621                 if (avail_dsds == 0) {
2622                         /*
2623                         * Five DSDs are available in the Cont.
2624                         * Type 1 IOCB.
2625                                */
2626                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2627                             vha->hw->req_q_map[0]);
2628                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2629                         avail_dsds = 5;
2630                         entry_count++;
2631                 }
2632
2633                 sle_dma = sg_dma_address(sg);
2634                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2635                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2636                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2637                 loop_iterartion++;
2638                 avail_dsds--;
2639         }
2640         ct_iocb->entry_count = entry_count;
2641
2642         sp->vha->qla_stats.control_requests++;
2643 }
2644
2645 static void
2646 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2647 {
2648         uint16_t        avail_dsds;
2649         uint32_t        *cur_dsd;
2650         struct scatterlist *sg;
2651         int index;
2652         uint16_t tot_dsds;
2653         scsi_qla_host_t *vha = sp->vha;
2654         struct qla_hw_data *ha = vha->hw;
2655         struct bsg_job *bsg_job = sp->u.bsg_job;
2656         int loop_iterartion = 0;
2657         int entry_count = 1;
2658
2659         ct_iocb->entry_type = CT_IOCB_TYPE;
2660         ct_iocb->entry_status = 0;
2661         ct_iocb->sys_define = 0;
2662         ct_iocb->handle = sp->handle;
2663
2664         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2665         ct_iocb->vp_index = sp->vha->vp_idx;
2666         ct_iocb->comp_status = cpu_to_le16(0);
2667
2668         ct_iocb->cmd_dsd_count =
2669                 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2670         ct_iocb->timeout = 0;
2671         ct_iocb->rsp_dsd_count =
2672                 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2673         ct_iocb->rsp_byte_count =
2674             cpu_to_le32(bsg_job->reply_payload.payload_len);
2675         ct_iocb->cmd_byte_count =
2676             cpu_to_le32(bsg_job->request_payload.payload_len);
2677         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2678             (bsg_job->request_payload.sg_list)));
2679         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2680            (bsg_job->request_payload.sg_list)));
2681         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2682             (bsg_job->request_payload.sg_list));
2683
2684         avail_dsds = 1;
2685         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2686         index = 0;
2687         tot_dsds = bsg_job->reply_payload.sg_cnt;
2688
2689         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2690                 dma_addr_t       sle_dma;
2691                 cont_a64_entry_t *cont_pkt;
2692
2693                 /* Allocate additional continuation packets? */
2694                 if (avail_dsds == 0) {
2695                         /*
2696                         * Five DSDs are available in the Cont.
2697                         * Type 1 IOCB.
2698                                */
2699                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2700                             ha->req_q_map[0]);
2701                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2702                         avail_dsds = 5;
2703                         entry_count++;
2704                 }
2705
2706                 sle_dma = sg_dma_address(sg);
2707                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2708                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2709                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2710                 loop_iterartion++;
2711                 avail_dsds--;
2712         }
2713         ct_iocb->entry_count = entry_count;
2714 }
2715
2716 /*
2717  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2718  * @sp: command to send to the ISP
2719  *
2720  * Returns non-zero if a failure occurred, else zero.
2721  */
2722 int
2723 qla82xx_start_scsi(srb_t *sp)
2724 {
2725         int             nseg;
2726         unsigned long   flags;
2727         struct scsi_cmnd *cmd;
2728         uint32_t        *clr_ptr;
2729         uint32_t        index;
2730         uint32_t        handle;
2731         uint16_t        cnt;
2732         uint16_t        req_cnt;
2733         uint16_t        tot_dsds;
2734         struct device_reg_82xx __iomem *reg;
2735         uint32_t dbval;
2736         uint32_t *fcp_dl;
2737         uint8_t additional_cdb_len;
2738         struct ct6_dsd *ctx;
2739         struct scsi_qla_host *vha = sp->vha;
2740         struct qla_hw_data *ha = vha->hw;
2741         struct req_que *req = NULL;
2742         struct rsp_que *rsp = NULL;
2743
2744         /* Setup device pointers. */
2745         reg = &ha->iobase->isp82;
2746         cmd = GET_CMD_SP(sp);
2747         req = vha->req;
2748         rsp = ha->rsp_q_map[0];
2749
2750         /* So we know we haven't pci_map'ed anything yet */
2751         tot_dsds = 0;
2752
2753         dbval = 0x04 | (ha->portnum << 5);
2754
2755         /* Send marker if required */
2756         if (vha->marker_needed != 0) {
2757                 if (qla2x00_marker(vha, req,
2758                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2759                         ql_log(ql_log_warn, vha, 0x300c,
2760                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2761                         return QLA_FUNCTION_FAILED;
2762                 }
2763                 vha->marker_needed = 0;
2764         }
2765
2766         /* Acquire ring specific lock */
2767         spin_lock_irqsave(&ha->hardware_lock, flags);
2768
2769         /* Check for room in outstanding command list. */
2770         handle = req->current_outstanding_cmd;
2771         for (index = 1; index < req->num_outstanding_cmds; index++) {
2772                 handle++;
2773                 if (handle == req->num_outstanding_cmds)
2774                         handle = 1;
2775                 if (!req->outstanding_cmds[handle])
2776                         break;
2777         }
2778         if (index == req->num_outstanding_cmds)
2779                 goto queuing_error;
2780
2781         /* Map the sg table so we have an accurate count of sg entries needed */
2782         if (scsi_sg_count(cmd)) {
2783                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2784                     scsi_sg_count(cmd), cmd->sc_data_direction);
2785                 if (unlikely(!nseg))
2786                         goto queuing_error;
2787         } else
2788                 nseg = 0;
2789
2790         tot_dsds = nseg;
2791
2792         if (tot_dsds > ql2xshiftctondsd) {
2793                 struct cmd_type_6 *cmd_pkt;
2794                 uint16_t more_dsd_lists = 0;
2795                 struct dsd_dma *dsd_ptr;
2796                 uint16_t i;
2797
2798                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2799                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2800                         ql_dbg(ql_dbg_io, vha, 0x300d,
2801                             "Num of DSD list %d is than %d for cmd=%p.\n",
2802                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2803                             cmd);
2804                         goto queuing_error;
2805                 }
2806
2807                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2808                         goto sufficient_dsds;
2809                 else
2810                         more_dsd_lists -= ha->gbl_dsd_avail;
2811
2812                 for (i = 0; i < more_dsd_lists; i++) {
2813                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2814                         if (!dsd_ptr) {
2815                                 ql_log(ql_log_fatal, vha, 0x300e,
2816                                     "Failed to allocate memory for dsd_dma "
2817                                     "for cmd=%p.\n", cmd);
2818                                 goto queuing_error;
2819                         }
2820
2821                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2822                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2823                         if (!dsd_ptr->dsd_addr) {
2824                                 kfree(dsd_ptr);
2825                                 ql_log(ql_log_fatal, vha, 0x300f,
2826                                     "Failed to allocate memory for dsd_addr "
2827                                     "for cmd=%p.\n", cmd);
2828                                 goto queuing_error;
2829                         }
2830                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2831                         ha->gbl_dsd_avail++;
2832                 }
2833
2834 sufficient_dsds:
2835                 req_cnt = 1;
2836
2837                 if (req->cnt < (req_cnt + 2)) {
2838                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2839                                 &reg->req_q_out[0]);
2840                         if (req->ring_index < cnt)
2841                                 req->cnt = cnt - req->ring_index;
2842                         else
2843                                 req->cnt = req->length -
2844                                         (req->ring_index - cnt);
2845                         if (req->cnt < (req_cnt + 2))
2846                                 goto queuing_error;
2847                 }
2848
2849                 ctx = sp->u.scmd.ctx =
2850                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2851                 if (!ctx) {
2852                         ql_log(ql_log_fatal, vha, 0x3010,
2853                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2854                         goto queuing_error;
2855                 }
2856
2857                 memset(ctx, 0, sizeof(struct ct6_dsd));
2858                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2859                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2860                 if (!ctx->fcp_cmnd) {
2861                         ql_log(ql_log_fatal, vha, 0x3011,
2862                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2863                         goto queuing_error;
2864                 }
2865
2866                 /* Initialize the DSD list and dma handle */
2867                 INIT_LIST_HEAD(&ctx->dsd_list);
2868                 ctx->dsd_use_cnt = 0;
2869
2870                 if (cmd->cmd_len > 16) {
2871                         additional_cdb_len = cmd->cmd_len - 16;
2872                         if ((cmd->cmd_len % 4) != 0) {
2873                                 /* SCSI command bigger than 16 bytes must be
2874                                  * multiple of 4
2875                                  */
2876                                 ql_log(ql_log_warn, vha, 0x3012,
2877                                     "scsi cmd len %d not multiple of 4 "
2878                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2879                                 goto queuing_error_fcp_cmnd;
2880                         }
2881                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2882                 } else {
2883                         additional_cdb_len = 0;
2884                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2885                 }
2886
2887                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2888                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2889
2890                 /* Zero out remaining portion of packet. */
2891                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2892                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2893                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2894                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2895
2896                 /* Set NPORT-ID and LUN number*/
2897                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2898                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2899                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2900                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2901                 cmd_pkt->vp_index = sp->vha->vp_idx;
2902
2903                 /* Build IOCB segments */
2904                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2905                         goto queuing_error_fcp_cmnd;
2906
2907                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2908                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2909
2910                 /* build FCP_CMND IU */
2911                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2912                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2913                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2914
2915                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2916                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2917                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2918                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2919
2920                 /* Populate the FCP_PRIO. */
2921                 if (ha->flags.fcp_prio_enabled)
2922                         ctx->fcp_cmnd->task_attribute |=
2923                             sp->fcport->fcp_prio << 3;
2924
2925                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2926
2927                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2928                     additional_cdb_len);
2929                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2930
2931                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2932                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2933                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2934                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2935                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2936
2937                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2938                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2939                 /* Set total data segment count. */
2940                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2941                 /* Specify response queue number where
2942                  * completion should happen
2943                  */
2944                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2945         } else {
2946                 struct cmd_type_7 *cmd_pkt;
2947                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2948                 if (req->cnt < (req_cnt + 2)) {
2949                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2950                             &reg->req_q_out[0]);
2951                         if (req->ring_index < cnt)
2952                                 req->cnt = cnt - req->ring_index;
2953                         else
2954                                 req->cnt = req->length -
2955                                         (req->ring_index - cnt);
2956                 }
2957                 if (req->cnt < (req_cnt + 2))
2958                         goto queuing_error;
2959
2960                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2961                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2962
2963                 /* Zero out remaining portion of packet. */
2964                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2965                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2966                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2967                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2968
2969                 /* Set NPORT-ID and LUN number*/
2970                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2971                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2972                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2973                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2974                 cmd_pkt->vp_index = sp->vha->vp_idx;
2975
2976                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2977                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2978                     sizeof(cmd_pkt->lun));
2979
2980                 /* Populate the FCP_PRIO. */
2981                 if (ha->flags.fcp_prio_enabled)
2982                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2983
2984                 /* Load SCSI command packet. */
2985                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2986                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2987
2988                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2989
2990                 /* Build IOCB segments */
2991                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2992
2993                 /* Set total data segment count. */
2994                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2995                 /* Specify response queue number where
2996                  * completion should happen.
2997                  */
2998                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2999
3000         }
3001         /* Build command packet. */
3002         req->current_outstanding_cmd = handle;
3003         req->outstanding_cmds[handle] = sp;
3004         sp->handle = handle;
3005         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3006         req->cnt -= req_cnt;
3007         wmb();
3008
3009         /* Adjust ring index. */
3010         req->ring_index++;
3011         if (req->ring_index == req->length) {
3012                 req->ring_index = 0;
3013                 req->ring_ptr = req->ring;
3014         } else
3015                 req->ring_ptr++;
3016
3017         sp->flags |= SRB_DMA_VALID;
3018
3019         /* Set chip new ring index. */
3020         /* write, read and verify logic */
3021         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3022         if (ql2xdbwr)
3023                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3024         else {
3025                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3026                 wmb();
3027                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3028                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3029                         wmb();
3030                 }
3031         }
3032
3033         /* Manage unprocessed RIO/ZIO commands in response queue. */
3034         if (vha->flags.process_response_queue &&
3035             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3036                 qla24xx_process_response_queue(vha, rsp);
3037
3038         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3039         return QLA_SUCCESS;
3040
3041 queuing_error_fcp_cmnd:
3042         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3043 queuing_error:
3044         if (tot_dsds)
3045                 scsi_dma_unmap(cmd);
3046
3047         if (sp->u.scmd.ctx) {
3048                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3049                 sp->u.scmd.ctx = NULL;
3050         }
3051         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3052
3053         return QLA_FUNCTION_FAILED;
3054 }
3055
3056 static void
3057 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3058 {
3059         struct srb_iocb *aio = &sp->u.iocb_cmd;
3060         scsi_qla_host_t *vha = sp->vha;
3061         struct req_que *req = vha->req;
3062
3063         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3064         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3065         abt_iocb->entry_count = 1;
3066         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3067         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3068         abt_iocb->handle_to_abort =
3069             cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3070         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3071         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3072         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3073         abt_iocb->vp_index = vha->vp_idx;
3074         abt_iocb->req_que_no = cpu_to_le16(req->id);
3075         /* Send the command to the firmware */
3076         wmb();
3077 }
3078
3079 static void
3080 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3081 {
3082         int i, sz;
3083
3084         mbx->entry_type = MBX_IOCB_TYPE;
3085         mbx->handle = sp->handle;
3086         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3087
3088         for (i = 0; i < sz; i++)
3089                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3090 }
3091
3092 static void
3093 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3094 {
3095         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3096         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3097         ct_pkt->handle = sp->handle;
3098 }
3099
3100 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3101         struct nack_to_isp *nack)
3102 {
3103         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3104
3105         nack->entry_type = NOTIFY_ACK_TYPE;
3106         nack->entry_count = 1;
3107         nack->ox_id = ntfy->ox_id;
3108
3109         nack->u.isp24.handle = sp->handle;
3110         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3111         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3112                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3113                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3114         }
3115         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3116         nack->u.isp24.status = ntfy->u.isp24.status;
3117         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3118         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3119         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3120         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3121         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3122         nack->u.isp24.srr_flags = 0;
3123         nack->u.isp24.srr_reject_code = 0;
3124         nack->u.isp24.srr_reject_code_expl = 0;
3125         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3126 }
3127
3128 int
3129 qla2x00_start_sp(srb_t *sp)
3130 {
3131         int rval;
3132         scsi_qla_host_t *vha = sp->vha;
3133         struct qla_hw_data *ha = vha->hw;
3134         void *pkt;
3135         unsigned long flags;
3136
3137         rval = QLA_FUNCTION_FAILED;
3138         spin_lock_irqsave(&ha->hardware_lock, flags);
3139         pkt = qla2x00_alloc_iocbs(vha, sp);
3140         if (!pkt) {
3141                 ql_log(ql_log_warn, vha, 0x700c,
3142                     "qla2x00_alloc_iocbs failed.\n");
3143                 goto done;
3144         }
3145
3146         rval = QLA_SUCCESS;
3147         switch (sp->type) {
3148         case SRB_LOGIN_CMD:
3149                 IS_FWI2_CAPABLE(ha) ?
3150                     qla24xx_login_iocb(sp, pkt) :
3151                     qla2x00_login_iocb(sp, pkt);
3152                 break;
3153         case SRB_LOGOUT_CMD:
3154                 IS_FWI2_CAPABLE(ha) ?
3155                     qla24xx_logout_iocb(sp, pkt) :
3156                     qla2x00_logout_iocb(sp, pkt);
3157                 break;
3158         case SRB_ELS_CMD_RPT:
3159         case SRB_ELS_CMD_HST:
3160                 qla24xx_els_iocb(sp, pkt);
3161                 break;
3162         case SRB_CT_CMD:
3163                 IS_FWI2_CAPABLE(ha) ?
3164                     qla24xx_ct_iocb(sp, pkt) :
3165                     qla2x00_ct_iocb(sp, pkt);
3166                 break;
3167         case SRB_ADISC_CMD:
3168                 IS_FWI2_CAPABLE(ha) ?
3169                     qla24xx_adisc_iocb(sp, pkt) :
3170                     qla2x00_adisc_iocb(sp, pkt);
3171                 break;
3172         case SRB_TM_CMD:
3173                 IS_QLAFX00(ha) ?
3174                     qlafx00_tm_iocb(sp, pkt) :
3175                     qla24xx_tm_iocb(sp, pkt);
3176                 break;
3177         case SRB_FXIOCB_DCMD:
3178         case SRB_FXIOCB_BCMD:
3179                 qlafx00_fxdisc_iocb(sp, pkt);
3180                 break;
3181         case SRB_ABT_CMD:
3182                 IS_QLAFX00(ha) ?
3183                         qlafx00_abort_iocb(sp, pkt) :
3184                         qla24xx_abort_iocb(sp, pkt);
3185                 break;
3186         case SRB_ELS_DCMD:
3187                 qla24xx_els_logo_iocb(sp, pkt);
3188                 break;
3189         case SRB_CT_PTHRU_CMD:
3190                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3191                 break;
3192         case SRB_MB_IOCB:
3193                 qla2x00_mb_iocb(sp, pkt);
3194                 break;
3195         case SRB_NACK_PLOGI:
3196         case SRB_NACK_PRLI:
3197         case SRB_NACK_LOGO:
3198                 qla2x00_send_notify_ack_iocb(sp, pkt);
3199                 break;
3200         default:
3201                 break;
3202         }
3203
3204         wmb();
3205         qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3206 done:
3207         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3208         return rval;
3209 }
3210
3211 static void
3212 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3213                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3214 {
3215         uint16_t avail_dsds;
3216         uint32_t *cur_dsd;
3217         uint32_t req_data_len = 0;
3218         uint32_t rsp_data_len = 0;
3219         struct scatterlist *sg;
3220         int index;
3221         int entry_count = 1;
3222         struct bsg_job *bsg_job = sp->u.bsg_job;
3223
3224         /*Update entry type to indicate bidir command */
3225         *((uint32_t *)(&cmd_pkt->entry_type)) =
3226                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3227
3228         /* Set the transfer direction, in this set both flags
3229          * Also set the BD_WRAP_BACK flag, firmware will take care
3230          * assigning DID=SID for outgoing pkts.
3231          */
3232         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3233         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3234         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3235                                                         BD_WRAP_BACK);
3236
3237         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3238         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3239         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3240         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3241
3242         vha->bidi_stats.transfer_bytes += req_data_len;
3243         vha->bidi_stats.io_count++;
3244
3245         vha->qla_stats.output_bytes += req_data_len;
3246         vha->qla_stats.output_requests++;
3247
3248         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3249          * are bundled in continuation iocb
3250          */
3251         avail_dsds = 1;
3252         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3253
3254         index = 0;
3255
3256         for_each_sg(bsg_job->request_payload.sg_list, sg,
3257                                 bsg_job->request_payload.sg_cnt, index) {
3258                 dma_addr_t sle_dma;
3259                 cont_a64_entry_t *cont_pkt;
3260
3261                 /* Allocate additional continuation packets */
3262                 if (avail_dsds == 0) {
3263                         /* Continuation type 1 IOCB can accomodate
3264                          * 5 DSDS
3265                          */
3266                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3267                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3268                         avail_dsds = 5;
3269                         entry_count++;
3270                 }
3271                 sle_dma = sg_dma_address(sg);
3272                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3273                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3274                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3275                 avail_dsds--;
3276         }
3277         /* For read request DSD will always goes to continuation IOCB
3278          * and follow the write DSD. If there is room on the current IOCB
3279          * then it is added to that IOCB else new continuation IOCB is
3280          * allocated.
3281          */
3282         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3283                                 bsg_job->reply_payload.sg_cnt, index) {
3284                 dma_addr_t sle_dma;
3285                 cont_a64_entry_t *cont_pkt;
3286
3287                 /* Allocate additional continuation packets */
3288                 if (avail_dsds == 0) {
3289                         /* Continuation type 1 IOCB can accomodate
3290                          * 5 DSDS
3291                          */
3292                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3293                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3294                         avail_dsds = 5;
3295                         entry_count++;
3296                 }
3297                 sle_dma = sg_dma_address(sg);
3298                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3299                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3300                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3301                 avail_dsds--;
3302         }
3303         /* This value should be same as number of IOCB required for this cmd */
3304         cmd_pkt->entry_count = entry_count;
3305 }
3306
3307 int
3308 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3309 {
3310
3311         struct qla_hw_data *ha = vha->hw;
3312         unsigned long flags;
3313         uint32_t handle;
3314         uint32_t index;
3315         uint16_t req_cnt;
3316         uint16_t cnt;
3317         uint32_t *clr_ptr;
3318         struct cmd_bidir *cmd_pkt = NULL;
3319         struct rsp_que *rsp;
3320         struct req_que *req;
3321         int rval = EXT_STATUS_OK;
3322
3323         rval = QLA_SUCCESS;
3324
3325         rsp = ha->rsp_q_map[0];
3326         req = vha->req;
3327
3328         /* Send marker if required */
3329         if (vha->marker_needed != 0) {
3330                 if (qla2x00_marker(vha, req,
3331                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3332                         return EXT_STATUS_MAILBOX;
3333                 vha->marker_needed = 0;
3334         }
3335
3336         /* Acquire ring specific lock */
3337         spin_lock_irqsave(&ha->hardware_lock, flags);
3338
3339         /* Check for room in outstanding command list. */
3340         handle = req->current_outstanding_cmd;
3341         for (index = 1; index < req->num_outstanding_cmds; index++) {
3342                 handle++;
3343                 if (handle == req->num_outstanding_cmds)
3344                         handle = 1;
3345                 if (!req->outstanding_cmds[handle])
3346                         break;
3347         }
3348
3349         if (index == req->num_outstanding_cmds) {
3350                 rval = EXT_STATUS_BUSY;
3351                 goto queuing_error;
3352         }
3353
3354         /* Calculate number of IOCB required */
3355         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3356
3357         /* Check for room on request queue. */
3358         if (req->cnt < req_cnt + 2) {
3359                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3360                     RD_REG_DWORD_RELAXED(req->req_q_out);
3361                 if  (req->ring_index < cnt)
3362                         req->cnt = cnt - req->ring_index;
3363                 else
3364                         req->cnt = req->length -
3365                                 (req->ring_index - cnt);
3366         }
3367         if (req->cnt < req_cnt + 2) {
3368                 rval = EXT_STATUS_BUSY;
3369                 goto queuing_error;
3370         }
3371
3372         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3373         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3374
3375         /* Zero out remaining portion of packet. */
3376         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3377         clr_ptr = (uint32_t *)cmd_pkt + 2;
3378         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3379
3380         /* Set NPORT-ID  (of vha)*/
3381         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3382         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3383         cmd_pkt->port_id[1] = vha->d_id.b.area;
3384         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3385
3386         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3387         cmd_pkt->entry_status = (uint8_t) rsp->id;
3388         /* Build command packet. */
3389         req->current_outstanding_cmd = handle;
3390         req->outstanding_cmds[handle] = sp;
3391         sp->handle = handle;
3392         req->cnt -= req_cnt;
3393
3394         /* Send the command to the firmware */
3395         wmb();
3396         qla2x00_start_iocbs(vha, req);
3397 queuing_error:
3398         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3399         return rval;
3400 }