]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
qla2xxx: T10-Dif: add T10-PI support
[karo-tx-linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35                 vha->qla_stats.output_requests++;
36         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37                 cflags = CF_READ;
38                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39                 vha->qla_stats.input_requests++;
40         }
41         return (cflags);
42 }
43
44 /**
45  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46  * Continuation Type 0 IOCBs to allocate.
47  *
48  * @dsds: number of data segment decriptors needed
49  *
50  * Returns the number of IOCB entries needed to store @dsds.
51  */
52 uint16_t
53 qla2x00_calc_iocbs_32(uint16_t dsds)
54 {
55         uint16_t iocbs;
56
57         iocbs = 1;
58         if (dsds > 3) {
59                 iocbs += (dsds - 3) / 7;
60                 if ((dsds - 3) % 7)
61                         iocbs++;
62         }
63         return (iocbs);
64 }
65
66 /**
67  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68  * Continuation Type 1 IOCBs to allocate.
69  *
70  * @dsds: number of data segment decriptors needed
71  *
72  * Returns the number of IOCB entries needed to store @dsds.
73  */
74 uint16_t
75 qla2x00_calc_iocbs_64(uint16_t dsds)
76 {
77         uint16_t iocbs;
78
79         iocbs = 1;
80         if (dsds > 2) {
81                 iocbs += (dsds - 2) / 5;
82                 if ((dsds - 2) % 5)
83                         iocbs++;
84         }
85         return (iocbs);
86 }
87
88 /**
89  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90  * @ha: HA context
91  *
92  * Returns a pointer to the Continuation Type 0 IOCB packet.
93  */
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 {
97         cont_entry_t *cont_pkt;
98         struct req_que *req = vha->req;
99         /* Adjust ring index. */
100         req->ring_index++;
101         if (req->ring_index == req->length) {
102                 req->ring_index = 0;
103                 req->ring_ptr = req->ring;
104         } else {
105                 req->ring_ptr++;
106         }
107
108         cont_pkt = (cont_entry_t *)req->ring_ptr;
109
110         /* Load packet defaults. */
111         *((uint32_t *)(&cont_pkt->entry_type)) =
112             __constant_cpu_to_le32(CONTINUE_TYPE);
113
114         return (cont_pkt);
115 }
116
117 /**
118  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119  * @ha: HA context
120  *
121  * Returns a pointer to the continuation type 1 IOCB packet.
122  */
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 {
126         cont_a64_entry_t *cont_pkt;
127
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143
144         return (cont_pkt);
145 }
146
147 static inline int
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 {
150         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151         uint8_t guard = scsi_host_get_guard(cmd->device->host);
152
153         /* We always use DIFF Bundling for best performance */
154         *fw_prot_opts = 0;
155
156         /* Translate SCSI opcode to a protection opcode */
157         switch (scsi_get_prot_op(cmd)) {
158         case SCSI_PROT_READ_STRIP:
159                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160                 break;
161         case SCSI_PROT_WRITE_INSERT:
162                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163                 break;
164         case SCSI_PROT_READ_INSERT:
165                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166                 break;
167         case SCSI_PROT_WRITE_STRIP:
168                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169                 break;
170         case SCSI_PROT_READ_PASS:
171         case SCSI_PROT_WRITE_PASS:
172                 if (guard & SHOST_DIX_GUARD_IP)
173                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174                 else
175                         *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         default:        /* Normal Request */
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         }
181
182         return scsi_prot_sg_count(cmd);
183 }
184
185 /*
186  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187  * capable IOCB types.
188  *
189  * @sp: SRB command to process
190  * @cmd_pkt: Command type 2 IOCB
191  * @tot_dsds: Total number of segments to transfer
192  */
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194     uint16_t tot_dsds)
195 {
196         uint16_t        avail_dsds;
197         uint32_t        *cur_dsd;
198         scsi_qla_host_t *vha;
199         struct scsi_cmnd *cmd;
200         struct scatterlist *sg;
201         int i;
202
203         cmd = GET_CMD_SP(sp);
204
205         /* Update entry type to indicate Command Type 2 IOCB */
206         *((uint32_t *)(&cmd_pkt->entry_type)) =
207             __constant_cpu_to_le32(COMMAND_TYPE);
208
209         /* No data transfer */
210         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212                 return;
213         }
214
215         vha = sp->fcport->vha;
216         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217
218         /* Three DSDs are available in the Command Type 2 IOCB */
219         avail_dsds = 3;
220         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221
222         /* Load data segments */
223         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224                 cont_entry_t *cont_pkt;
225
226                 /* Allocate additional continuation packets? */
227                 if (avail_dsds == 0) {
228                         /*
229                          * Seven DSDs are available in the Continuation
230                          * Type 0 IOCB.
231                          */
232                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234                         avail_dsds = 7;
235                 }
236
237                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239                 avail_dsds--;
240         }
241 }
242
243 /**
244  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245  * capable IOCB types.
246  *
247  * @sp: SRB command to process
248  * @cmd_pkt: Command type 3 IOCB
249  * @tot_dsds: Total number of segments to transfer
250  */
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252     uint16_t tot_dsds)
253 {
254         uint16_t        avail_dsds;
255         uint32_t        *cur_dsd;
256         scsi_qla_host_t *vha;
257         struct scsi_cmnd *cmd;
258         struct scatterlist *sg;
259         int i;
260
261         cmd = GET_CMD_SP(sp);
262
263         /* Update entry type to indicate Command Type 3 IOCB */
264         *((uint32_t *)(&cmd_pkt->entry_type)) =
265             __constant_cpu_to_le32(COMMAND_A64_TYPE);
266
267         /* No data transfer */
268         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270                 return;
271         }
272
273         vha = sp->fcport->vha;
274         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
275
276         /* Two DSDs are available in the Command Type 3 IOCB */
277         avail_dsds = 2;
278         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279
280         /* Load data segments */
281         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282                 dma_addr_t      sle_dma;
283                 cont_a64_entry_t *cont_pkt;
284
285                 /* Allocate additional continuation packets? */
286                 if (avail_dsds == 0) {
287                         /*
288                          * Five DSDs are available in the Continuation
289                          * Type 1 IOCB.
290                          */
291                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293                         avail_dsds = 5;
294                 }
295
296                 sle_dma = sg_dma_address(sg);
297                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300                 avail_dsds--;
301         }
302 }
303
304 /**
305  * qla2x00_start_scsi() - Send a SCSI command to the ISP
306  * @sp: command to send to the ISP
307  *
308  * Returns non-zero if a failure occurred, else zero.
309  */
310 int
311 qla2x00_start_scsi(srb_t *sp)
312 {
313         int             ret, nseg;
314         unsigned long   flags;
315         scsi_qla_host_t *vha;
316         struct scsi_cmnd *cmd;
317         uint32_t        *clr_ptr;
318         uint32_t        index;
319         uint32_t        handle;
320         cmd_entry_t     *cmd_pkt;
321         uint16_t        cnt;
322         uint16_t        req_cnt;
323         uint16_t        tot_dsds;
324         struct device_reg_2xxx __iomem *reg;
325         struct qla_hw_data *ha;
326         struct req_que *req;
327         struct rsp_que *rsp;
328         char            tag[2];
329
330         /* Setup device pointers. */
331         ret = 0;
332         vha = sp->fcport->vha;
333         ha = vha->hw;
334         reg = &ha->iobase->isp;
335         cmd = GET_CMD_SP(sp);
336         req = ha->req_q_map[0];
337         rsp = ha->rsp_q_map[0];
338         /* So we know we haven't pci_map'ed anything yet */
339         tot_dsds = 0;
340
341         /* Send marker if required */
342         if (vha->marker_needed != 0) {
343                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
344                     QLA_SUCCESS) {
345                         return (QLA_FUNCTION_FAILED);
346                 }
347                 vha->marker_needed = 0;
348         }
349
350         /* Acquire ring specific lock */
351         spin_lock_irqsave(&ha->hardware_lock, flags);
352
353         /* Check for room in outstanding command list. */
354         handle = req->current_outstanding_cmd;
355         for (index = 1; index < req->num_outstanding_cmds; index++) {
356                 handle++;
357                 if (handle == req->num_outstanding_cmds)
358                         handle = 1;
359                 if (!req->outstanding_cmds[handle])
360                         break;
361         }
362         if (index == req->num_outstanding_cmds)
363                 goto queuing_error;
364
365         /* Map the sg table so we have an accurate count of sg entries needed */
366         if (scsi_sg_count(cmd)) {
367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368                     scsi_sg_count(cmd), cmd->sc_data_direction);
369                 if (unlikely(!nseg))
370                         goto queuing_error;
371         } else
372                 nseg = 0;
373
374         tot_dsds = nseg;
375
376         /* Calculate the number of request entries needed. */
377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378         if (req->cnt < (req_cnt + 2)) {
379                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380                 if (req->ring_index < cnt)
381                         req->cnt = cnt - req->ring_index;
382                 else
383                         req->cnt = req->length -
384                             (req->ring_index - cnt);
385                 /* If still no head room then bail out */
386                 if (req->cnt < (req_cnt + 2))
387                         goto queuing_error;
388         }
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         if (scsi_populate_tag_msg(cmd, tag)) {
410                 switch (tag[0]) {
411                 case HEAD_OF_QUEUE_TAG:
412                         cmd_pkt->control_flags =
413                             __constant_cpu_to_le16(CF_HEAD_TAG);
414                         break;
415                 case ORDERED_QUEUE_TAG:
416                         cmd_pkt->control_flags =
417                             __constant_cpu_to_le16(CF_ORDERED_TAG);
418                         break;
419                 default:
420                         cmd_pkt->control_flags =
421                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
422                         break;
423                 }
424         } else {
425                 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426         }
427
428         /* Load SCSI command packet. */
429         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431
432         /* Build IOCB segments */
433         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434
435         /* Set total data segment count. */
436         cmd_pkt->entry_count = (uint8_t)req_cnt;
437         wmb();
438
439         /* Adjust ring index. */
440         req->ring_index++;
441         if (req->ring_index == req->length) {
442                 req->ring_index = 0;
443                 req->ring_ptr = req->ring;
444         } else
445                 req->ring_ptr++;
446
447         sp->flags |= SRB_DMA_VALID;
448
449         /* Set chip new ring index. */
450         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
452
453         /* Manage unprocessed RIO/ZIO commands in response queue. */
454         if (vha->flags.process_response_queue &&
455             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456                 qla2x00_process_response_queue(rsp);
457
458         spin_unlock_irqrestore(&ha->hardware_lock, flags);
459         return (QLA_SUCCESS);
460
461 queuing_error:
462         if (tot_dsds)
463                 scsi_dma_unmap(cmd);
464
465         spin_unlock_irqrestore(&ha->hardware_lock, flags);
466
467         return (QLA_FUNCTION_FAILED);
468 }
469
470 /**
471  * qla2x00_start_iocbs() - Execute the IOCB command
472  */
473 void
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475 {
476         struct qla_hw_data *ha = vha->hw;
477         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
478
479         if (IS_P3P_TYPE(ha)) {
480                 qla82xx_start_iocbs(vha);
481         } else {
482                 /* Adjust ring index. */
483                 req->ring_index++;
484                 if (req->ring_index == req->length) {
485                         req->ring_index = 0;
486                         req->ring_ptr = req->ring;
487                 } else
488                         req->ring_ptr++;
489
490                 /* Set chip new ring index. */
491                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
493                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494                 } else if (IS_QLAFX00(ha)) {
495                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
496                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
497                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498                 } else if (IS_FWI2_CAPABLE(ha)) {
499                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
500                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
501                 } else {
502                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
503                                 req->ring_index);
504                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
505                 }
506         }
507 }
508
509 /**
510  * qla2x00_marker() - Send a marker IOCB to the firmware.
511  * @ha: HA context
512  * @loop_id: loop ID
513  * @lun: LUN
514  * @type: marker modifier
515  *
516  * Can be called from both normal and interrupt context.
517  *
518  * Returns non-zero if a failure occurred, else zero.
519  */
520 static int
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522                         struct rsp_que *rsp, uint16_t loop_id,
523                         uint16_t lun, uint8_t type)
524 {
525         mrk_entry_t *mrk;
526         struct mrk_entry_24xx *mrk24 = NULL;
527
528         struct qla_hw_data *ha = vha->hw;
529         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
530
531         req = ha->req_q_map[0];
532         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
533         if (mrk == NULL) {
534                 ql_log(ql_log_warn, base_vha, 0x3026,
535                     "Failed to allocate Marker IOCB.\n");
536
537                 return (QLA_FUNCTION_FAILED);
538         }
539
540         mrk->entry_type = MARKER_TYPE;
541         mrk->modifier = type;
542         if (type != MK_SYNC_ALL) {
543                 if (IS_FWI2_CAPABLE(ha)) {
544                         mrk24 = (struct mrk_entry_24xx *) mrk;
545                         mrk24->nport_handle = cpu_to_le16(loop_id);
546                         mrk24->lun[1] = LSB(lun);
547                         mrk24->lun[2] = MSB(lun);
548                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
549                         mrk24->vp_index = vha->vp_idx;
550                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
551                 } else {
552                         SET_TARGET_ID(ha, mrk->target, loop_id);
553                         mrk->lun = cpu_to_le16(lun);
554                 }
555         }
556         wmb();
557
558         qla2x00_start_iocbs(vha, req);
559
560         return (QLA_SUCCESS);
561 }
562
563 int
564 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
565                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
566                 uint8_t type)
567 {
568         int ret;
569         unsigned long flags = 0;
570
571         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
572         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
573         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
574
575         return (ret);
576 }
577
578 /*
579  * qla2x00_issue_marker
580  *
581  * Issue marker
582  * Caller CAN have hardware lock held as specified by ha_locked parameter.
583  * Might release it, then reaquire.
584  */
585 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
586 {
587         if (ha_locked) {
588                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
589                                         MK_SYNC_ALL) != QLA_SUCCESS)
590                         return QLA_FUNCTION_FAILED;
591         } else {
592                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
593                                         MK_SYNC_ALL) != QLA_SUCCESS)
594                         return QLA_FUNCTION_FAILED;
595         }
596         vha->marker_needed = 0;
597
598         return QLA_SUCCESS;
599 }
600
601 static inline int
602 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
603         uint16_t tot_dsds)
604 {
605         uint32_t *cur_dsd = NULL;
606         scsi_qla_host_t *vha;
607         struct qla_hw_data *ha;
608         struct scsi_cmnd *cmd;
609         struct  scatterlist *cur_seg;
610         uint32_t *dsd_seg;
611         void *next_dsd;
612         uint8_t avail_dsds;
613         uint8_t first_iocb = 1;
614         uint32_t dsd_list_len;
615         struct dsd_dma *dsd_ptr;
616         struct ct6_dsd *ctx;
617
618         cmd = GET_CMD_SP(sp);
619
620         /* Update entry type to indicate Command Type 3 IOCB */
621         *((uint32_t *)(&cmd_pkt->entry_type)) =
622                 __constant_cpu_to_le32(COMMAND_TYPE_6);
623
624         /* No data transfer */
625         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
626                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
627                 return 0;
628         }
629
630         vha = sp->fcport->vha;
631         ha = vha->hw;
632
633         /* Set transfer direction */
634         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
635                 cmd_pkt->control_flags =
636                     __constant_cpu_to_le16(CF_WRITE_DATA);
637                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
638                 vha->qla_stats.output_requests++;
639         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
640                 cmd_pkt->control_flags =
641                     __constant_cpu_to_le16(CF_READ_DATA);
642                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
643                 vha->qla_stats.input_requests++;
644         }
645
646         cur_seg = scsi_sglist(cmd);
647         ctx = GET_CMD_CTX_SP(sp);
648
649         while (tot_dsds) {
650                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
651                     QLA_DSDS_PER_IOCB : tot_dsds;
652                 tot_dsds -= avail_dsds;
653                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
654
655                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
656                     struct dsd_dma, list);
657                 next_dsd = dsd_ptr->dsd_addr;
658                 list_del(&dsd_ptr->list);
659                 ha->gbl_dsd_avail--;
660                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
661                 ctx->dsd_use_cnt++;
662                 ha->gbl_dsd_inuse++;
663
664                 if (first_iocb) {
665                         first_iocb = 0;
666                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
667                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
668                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
669                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
670                 } else {
671                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
672                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
673                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
674                 }
675                 cur_dsd = (uint32_t *)next_dsd;
676                 while (avail_dsds) {
677                         dma_addr_t      sle_dma;
678
679                         sle_dma = sg_dma_address(cur_seg);
680                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
681                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
682                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
683                         cur_seg = sg_next(cur_seg);
684                         avail_dsds--;
685                 }
686         }
687
688         /* Null termination */
689         *cur_dsd++ =  0;
690         *cur_dsd++ = 0;
691         *cur_dsd++ = 0;
692         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
693         return 0;
694 }
695
696 /*
697  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
698  * for Command Type 6.
699  *
700  * @dsds: number of data segment decriptors needed
701  *
702  * Returns the number of dsd list needed to store @dsds.
703  */
704 inline uint16_t
705 qla24xx_calc_dsd_lists(uint16_t dsds)
706 {
707         uint16_t dsd_lists = 0;
708
709         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
710         if (dsds % QLA_DSDS_PER_IOCB)
711                 dsd_lists++;
712         return dsd_lists;
713 }
714
715
716 /**
717  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
718  * IOCB types.
719  *
720  * @sp: SRB command to process
721  * @cmd_pkt: Command type 3 IOCB
722  * @tot_dsds: Total number of segments to transfer
723  */
724 inline void
725 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
726     uint16_t tot_dsds)
727 {
728         uint16_t        avail_dsds;
729         uint32_t        *cur_dsd;
730         scsi_qla_host_t *vha;
731         struct scsi_cmnd *cmd;
732         struct scatterlist *sg;
733         int i;
734         struct req_que *req;
735
736         cmd = GET_CMD_SP(sp);
737
738         /* Update entry type to indicate Command Type 3 IOCB */
739         *((uint32_t *)(&cmd_pkt->entry_type)) =
740             __constant_cpu_to_le32(COMMAND_TYPE_7);
741
742         /* No data transfer */
743         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
744                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
745                 return;
746         }
747
748         vha = sp->fcport->vha;
749         req = vha->req;
750
751         /* Set transfer direction */
752         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
753                 cmd_pkt->task_mgmt_flags =
754                     __constant_cpu_to_le16(TMF_WRITE_DATA);
755                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
756                 vha->qla_stats.output_requests++;
757         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
758                 cmd_pkt->task_mgmt_flags =
759                     __constant_cpu_to_le16(TMF_READ_DATA);
760                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
761                 vha->qla_stats.input_requests++;
762         }
763
764         /* One DSD is available in the Command Type 3 IOCB */
765         avail_dsds = 1;
766         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
767
768         /* Load data segments */
769
770         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
771                 dma_addr_t      sle_dma;
772                 cont_a64_entry_t *cont_pkt;
773
774                 /* Allocate additional continuation packets? */
775                 if (avail_dsds == 0) {
776                         /*
777                          * Five DSDs are available in the Continuation
778                          * Type 1 IOCB.
779                          */
780                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
781                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
782                         avail_dsds = 5;
783                 }
784
785                 sle_dma = sg_dma_address(sg);
786                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
787                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
788                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
789                 avail_dsds--;
790         }
791 }
792
793 struct fw_dif_context {
794         uint32_t ref_tag;
795         uint16_t app_tag;
796         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
797         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
798 };
799
800 /*
801  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
802  *
803  */
804 static inline void
805 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
806     unsigned int protcnt)
807 {
808         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
809
810         switch (scsi_get_prot_type(cmd)) {
811         case SCSI_PROT_DIF_TYPE0:
812                 /*
813                  * No check for ql2xenablehba_err_chk, as it would be an
814                  * I/O error if hba tag generation is not done.
815                  */
816                 pkt->ref_tag = cpu_to_le32((uint32_t)
817                     (0xffffffff & scsi_get_lba(cmd)));
818
819                 if (!qla2x00_hba_err_chk_enabled(sp))
820                         break;
821
822                 pkt->ref_tag_mask[0] = 0xff;
823                 pkt->ref_tag_mask[1] = 0xff;
824                 pkt->ref_tag_mask[2] = 0xff;
825                 pkt->ref_tag_mask[3] = 0xff;
826                 break;
827
828         /*
829          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
830          * match LBA in CDB + N
831          */
832         case SCSI_PROT_DIF_TYPE2:
833                 pkt->app_tag = __constant_cpu_to_le16(0);
834                 pkt->app_tag_mask[0] = 0x0;
835                 pkt->app_tag_mask[1] = 0x0;
836
837                 pkt->ref_tag = cpu_to_le32((uint32_t)
838                     (0xffffffff & scsi_get_lba(cmd)));
839
840                 if (!qla2x00_hba_err_chk_enabled(sp))
841                         break;
842
843                 /* enable ALL bytes of the ref tag */
844                 pkt->ref_tag_mask[0] = 0xff;
845                 pkt->ref_tag_mask[1] = 0xff;
846                 pkt->ref_tag_mask[2] = 0xff;
847                 pkt->ref_tag_mask[3] = 0xff;
848                 break;
849
850         /* For Type 3 protection: 16 bit GUARD only */
851         case SCSI_PROT_DIF_TYPE3:
852                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
853                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
854                                                                 0x00;
855                 break;
856
857         /*
858          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
859          * 16 bit app tag.
860          */
861         case SCSI_PROT_DIF_TYPE1:
862                 pkt->ref_tag = cpu_to_le32((uint32_t)
863                     (0xffffffff & scsi_get_lba(cmd)));
864                 pkt->app_tag = __constant_cpu_to_le16(0);
865                 pkt->app_tag_mask[0] = 0x0;
866                 pkt->app_tag_mask[1] = 0x0;
867
868                 if (!qla2x00_hba_err_chk_enabled(sp))
869                         break;
870
871                 /* enable ALL bytes of the ref tag */
872                 pkt->ref_tag_mask[0] = 0xff;
873                 pkt->ref_tag_mask[1] = 0xff;
874                 pkt->ref_tag_mask[2] = 0xff;
875                 pkt->ref_tag_mask[3] = 0xff;
876                 break;
877         }
878 }
879
880 struct qla2_sgx {
881         dma_addr_t              dma_addr;       /* OUT */
882         uint32_t                dma_len;        /* OUT */
883
884         uint32_t                tot_bytes;      /* IN */
885         struct scatterlist      *cur_sg;        /* IN */
886
887         /* for book keeping, bzero on initial invocation */
888         uint32_t                bytes_consumed;
889         uint32_t                num_bytes;
890         uint32_t                tot_partial;
891
892         /* for debugging */
893         uint32_t                num_sg;
894         srb_t                   *sp;
895 };
896
897 static int
898 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
899         uint32_t *partial)
900 {
901         struct scatterlist *sg;
902         uint32_t cumulative_partial, sg_len;
903         dma_addr_t sg_dma_addr;
904
905         if (sgx->num_bytes == sgx->tot_bytes)
906                 return 0;
907
908         sg = sgx->cur_sg;
909         cumulative_partial = sgx->tot_partial;
910
911         sg_dma_addr = sg_dma_address(sg);
912         sg_len = sg_dma_len(sg);
913
914         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
915
916         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
917                 sgx->dma_len = (blk_sz - cumulative_partial);
918                 sgx->tot_partial = 0;
919                 sgx->num_bytes += blk_sz;
920                 *partial = 0;
921         } else {
922                 sgx->dma_len = sg_len - sgx->bytes_consumed;
923                 sgx->tot_partial += sgx->dma_len;
924                 *partial = 1;
925         }
926
927         sgx->bytes_consumed += sgx->dma_len;
928
929         if (sg_len == sgx->bytes_consumed) {
930                 sg = sg_next(sg);
931                 sgx->num_sg++;
932                 sgx->cur_sg = sg;
933                 sgx->bytes_consumed = 0;
934         }
935
936         return 1;
937 }
938
939 int
940 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
941         uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
942 {
943         void *next_dsd;
944         uint8_t avail_dsds = 0;
945         uint32_t dsd_list_len;
946         struct dsd_dma *dsd_ptr;
947         struct scatterlist *sg_prot;
948         uint32_t *cur_dsd = dsd;
949         uint16_t        used_dsds = tot_dsds;
950
951         uint32_t        prot_int; /* protection interval */
952         uint32_t        partial;
953         struct qla2_sgx sgx;
954         dma_addr_t      sle_dma;
955         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
956         struct scsi_cmnd *cmd;
957         struct scsi_qla_host *vha;
958
959         memset(&sgx, 0, sizeof(struct qla2_sgx));
960         if (sp) {
961                 vha = sp->fcport->vha;
962                 cmd = GET_CMD_SP(sp);
963                 prot_int = cmd->device->sector_size;
964
965                 sgx.tot_bytes = scsi_bufflen(cmd);
966                 sgx.cur_sg = scsi_sglist(cmd);
967                 sgx.sp = sp;
968
969                 sg_prot = scsi_prot_sglist(cmd);
970         } else if (tc) {
971                 vha = tc->vha;
972                 prot_int      = tc->blk_sz;
973                 sgx.tot_bytes = tc->bufflen;
974                 sgx.cur_sg    = tc->sg;
975                 sg_prot       = tc->prot_sg;
976         } else {
977                 BUG();
978                 return 1;
979         }
980
981         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
982
983                 sle_dma = sgx.dma_addr;
984                 sle_dma_len = sgx.dma_len;
985 alloc_and_fill:
986                 /* Allocate additional continuation packets? */
987                 if (avail_dsds == 0) {
988                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
989                                         QLA_DSDS_PER_IOCB : used_dsds;
990                         dsd_list_len = (avail_dsds + 1) * 12;
991                         used_dsds -= avail_dsds;
992
993                         /* allocate tracking DS */
994                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
995                         if (!dsd_ptr)
996                                 return 1;
997
998                         /* allocate new list */
999                         dsd_ptr->dsd_addr = next_dsd =
1000                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1001                                 &dsd_ptr->dsd_list_dma);
1002
1003                         if (!next_dsd) {
1004                                 /*
1005                                  * Need to cleanup only this dsd_ptr, rest
1006                                  * will be done by sp_free_dma()
1007                                  */
1008                                 kfree(dsd_ptr);
1009                                 return 1;
1010                         }
1011
1012                         if (sp) {
1013                                 list_add_tail(&dsd_ptr->list,
1014                                     &((struct crc_context *)
1015                                             sp->u.scmd.ctx)->dsd_list);
1016
1017                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1018                         } else {
1019                                 list_add_tail(&dsd_ptr->list,
1020                                     &(tc->ctx->dsd_list));
1021                                 tc->ctx_dsd_alloced = 1;
1022                         }
1023
1024
1025                         /* add new list to cmd iocb or last list */
1026                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1027                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1028                         *cur_dsd++ = dsd_list_len;
1029                         cur_dsd = (uint32_t *)next_dsd;
1030                 }
1031                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1032                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1033                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1034                 avail_dsds--;
1035
1036                 if (partial == 0) {
1037                         /* Got a full protection interval */
1038                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1039                         sle_dma_len = 8;
1040
1041                         tot_prot_dma_len += sle_dma_len;
1042                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1043                                 tot_prot_dma_len = 0;
1044                                 sg_prot = sg_next(sg_prot);
1045                         }
1046
1047                         partial = 1; /* So as to not re-enter this block */
1048                         goto alloc_and_fill;
1049                 }
1050         }
1051         /* Null termination */
1052         *cur_dsd++ = 0;
1053         *cur_dsd++ = 0;
1054         *cur_dsd++ = 0;
1055         return 0;
1056 }
1057
1058 int
1059 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1060         uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1061 {
1062         void *next_dsd;
1063         uint8_t avail_dsds = 0;
1064         uint32_t dsd_list_len;
1065         struct dsd_dma *dsd_ptr;
1066         struct scatterlist *sg, *sgl;
1067         uint32_t *cur_dsd = dsd;
1068         int     i;
1069         uint16_t        used_dsds = tot_dsds;
1070         struct scsi_cmnd *cmd;
1071         struct scsi_qla_host *vha;
1072
1073         if (sp) {
1074                 cmd = GET_CMD_SP(sp);
1075                 sgl = scsi_sglist(cmd);
1076                 vha = sp->fcport->vha;
1077         } else if (tc) {
1078                 sgl = tc->sg;
1079                 vha = tc->vha;
1080         } else {
1081                 BUG();
1082                 return 1;
1083         }
1084
1085
1086         for_each_sg(sgl, sg, tot_dsds, i) {
1087                 dma_addr_t      sle_dma;
1088
1089                 /* Allocate additional continuation packets? */
1090                 if (avail_dsds == 0) {
1091                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1092                                         QLA_DSDS_PER_IOCB : used_dsds;
1093                         dsd_list_len = (avail_dsds + 1) * 12;
1094                         used_dsds -= avail_dsds;
1095
1096                         /* allocate tracking DS */
1097                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1098                         if (!dsd_ptr)
1099                                 return 1;
1100
1101                         /* allocate new list */
1102                         dsd_ptr->dsd_addr = next_dsd =
1103                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1104                                 &dsd_ptr->dsd_list_dma);
1105
1106                         if (!next_dsd) {
1107                                 /*
1108                                  * Need to cleanup only this dsd_ptr, rest
1109                                  * will be done by sp_free_dma()
1110                                  */
1111                                 kfree(dsd_ptr);
1112                                 return 1;
1113                         }
1114
1115                         if (sp) {
1116                                 list_add_tail(&dsd_ptr->list,
1117                                     &((struct crc_context *)
1118                                             sp->u.scmd.ctx)->dsd_list);
1119
1120                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1121                         } else {
1122                                 list_add_tail(&dsd_ptr->list,
1123                                     &(tc->ctx->dsd_list));
1124                                 tc->ctx_dsd_alloced = 1;
1125                         }
1126
1127                         /* add new list to cmd iocb or last list */
1128                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1129                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1130                         *cur_dsd++ = dsd_list_len;
1131                         cur_dsd = (uint32_t *)next_dsd;
1132                 }
1133                 sle_dma = sg_dma_address(sg);
1134
1135                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1136                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1137                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1138                 avail_dsds--;
1139
1140         }
1141         /* Null termination */
1142         *cur_dsd++ = 0;
1143         *cur_dsd++ = 0;
1144         *cur_dsd++ = 0;
1145         return 0;
1146 }
1147
1148 int
1149 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1150         uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1151 {
1152         void *next_dsd;
1153         uint8_t avail_dsds = 0;
1154         uint32_t dsd_list_len;
1155         struct dsd_dma *dsd_ptr;
1156         struct scatterlist *sg, *sgl;
1157         int     i;
1158         struct scsi_cmnd *cmd;
1159         uint32_t *cur_dsd = dsd;
1160         uint16_t used_dsds = tot_dsds;
1161         struct scsi_qla_host *vha;
1162
1163         if (sp) {
1164                 cmd = GET_CMD_SP(sp);
1165                 sgl = scsi_prot_sglist(cmd);
1166                 vha = sp->fcport->vha;
1167         } else if (tc) {
1168                 vha = tc->vha;
1169                 sgl = tc->prot_sg;
1170         } else {
1171                 BUG();
1172                 return 1;
1173         }
1174
1175         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1176                 "%s: enter\n", __func__);
1177
1178         for_each_sg(sgl, sg, tot_dsds, i) {
1179                 dma_addr_t      sle_dma;
1180
1181                 /* Allocate additional continuation packets? */
1182                 if (avail_dsds == 0) {
1183                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1184                                                 QLA_DSDS_PER_IOCB : used_dsds;
1185                         dsd_list_len = (avail_dsds + 1) * 12;
1186                         used_dsds -= avail_dsds;
1187
1188                         /* allocate tracking DS */
1189                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1190                         if (!dsd_ptr)
1191                                 return 1;
1192
1193                         /* allocate new list */
1194                         dsd_ptr->dsd_addr = next_dsd =
1195                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1196                                 &dsd_ptr->dsd_list_dma);
1197
1198                         if (!next_dsd) {
1199                                 /*
1200                                  * Need to cleanup only this dsd_ptr, rest
1201                                  * will be done by sp_free_dma()
1202                                  */
1203                                 kfree(dsd_ptr);
1204                                 return 1;
1205                         }
1206
1207                         if (sp) {
1208                                 list_add_tail(&dsd_ptr->list,
1209                                     &((struct crc_context *)
1210                                             sp->u.scmd.ctx)->dsd_list);
1211
1212                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1213                         } else {
1214                                 list_add_tail(&dsd_ptr->list,
1215                                     &(tc->ctx->dsd_list));
1216                                 tc->ctx_dsd_alloced = 1;
1217                         }
1218
1219                         /* add new list to cmd iocb or last list */
1220                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1221                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1222                         *cur_dsd++ = dsd_list_len;
1223                         cur_dsd = (uint32_t *)next_dsd;
1224                 }
1225                 sle_dma = sg_dma_address(sg);
1226
1227                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1228                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1229                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1230
1231                 avail_dsds--;
1232         }
1233         /* Null termination */
1234         *cur_dsd++ = 0;
1235         *cur_dsd++ = 0;
1236         *cur_dsd++ = 0;
1237         return 0;
1238 }
1239
1240 /**
1241  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1242  *                                                      Type 6 IOCB types.
1243  *
1244  * @sp: SRB command to process
1245  * @cmd_pkt: Command type 3 IOCB
1246  * @tot_dsds: Total number of segments to transfer
1247  */
1248 static inline int
1249 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1250     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1251 {
1252         uint32_t                *cur_dsd, *fcp_dl;
1253         scsi_qla_host_t         *vha;
1254         struct scsi_cmnd        *cmd;
1255         int                     sgc;
1256         uint32_t                total_bytes = 0;
1257         uint32_t                data_bytes;
1258         uint32_t                dif_bytes;
1259         uint8_t                 bundling = 1;
1260         uint16_t                blk_size;
1261         uint8_t                 *clr_ptr;
1262         struct crc_context      *crc_ctx_pkt = NULL;
1263         struct qla_hw_data      *ha;
1264         uint8_t                 additional_fcpcdb_len;
1265         uint16_t                fcp_cmnd_len;
1266         struct fcp_cmnd         *fcp_cmnd;
1267         dma_addr_t              crc_ctx_dma;
1268         char                    tag[2];
1269
1270         cmd = GET_CMD_SP(sp);
1271
1272         sgc = 0;
1273         /* Update entry type to indicate Command Type CRC_2 IOCB */
1274         *((uint32_t *)(&cmd_pkt->entry_type)) =
1275             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1276
1277         vha = sp->fcport->vha;
1278         ha = vha->hw;
1279
1280         /* No data transfer */
1281         data_bytes = scsi_bufflen(cmd);
1282         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1283                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1284                 return QLA_SUCCESS;
1285         }
1286
1287         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1288
1289         /* Set transfer direction */
1290         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1291                 cmd_pkt->control_flags =
1292                     __constant_cpu_to_le16(CF_WRITE_DATA);
1293         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1294                 cmd_pkt->control_flags =
1295                     __constant_cpu_to_le16(CF_READ_DATA);
1296         }
1297
1298         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1299             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1300             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1301             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1302                 bundling = 0;
1303
1304         /* Allocate CRC context from global pool */
1305         crc_ctx_pkt = sp->u.scmd.ctx =
1306             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1307
1308         if (!crc_ctx_pkt)
1309                 goto crc_queuing_error;
1310
1311         /* Zero out CTX area. */
1312         clr_ptr = (uint8_t *)crc_ctx_pkt;
1313         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1314
1315         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1316
1317         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1318
1319         /* Set handle */
1320         crc_ctx_pkt->handle = cmd_pkt->handle;
1321
1322         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1323
1324         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1325             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1326
1327         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1328         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1329         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1330
1331         /* Determine SCSI command length -- align to 4 byte boundary */
1332         if (cmd->cmd_len > 16) {
1333                 additional_fcpcdb_len = cmd->cmd_len - 16;
1334                 if ((cmd->cmd_len % 4) != 0) {
1335                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1336                         goto crc_queuing_error;
1337                 }
1338                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1339         } else {
1340                 additional_fcpcdb_len = 0;
1341                 fcp_cmnd_len = 12 + 16 + 4;
1342         }
1343
1344         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1345
1346         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1347         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1348                 fcp_cmnd->additional_cdb_len |= 1;
1349         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1350                 fcp_cmnd->additional_cdb_len |= 2;
1351
1352         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1353         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1354         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1355         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1356             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1357         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1358             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1359         fcp_cmnd->task_management = 0;
1360
1361         /*
1362          * Update tagged queuing modifier if using command tag queuing
1363          */
1364         if (scsi_populate_tag_msg(cmd, tag)) {
1365                 switch (tag[0]) {
1366                 case HEAD_OF_QUEUE_TAG:
1367                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1368                     break;
1369                 case ORDERED_QUEUE_TAG:
1370                     fcp_cmnd->task_attribute = TSK_ORDERED;
1371                     break;
1372                 default:
1373                     fcp_cmnd->task_attribute = TSK_SIMPLE;
1374                     break;
1375                 }
1376         } else {
1377                 fcp_cmnd->task_attribute = TSK_SIMPLE;
1378         }
1379
1380         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1381
1382         /* Compute dif len and adjust data len to incude protection */
1383         dif_bytes = 0;
1384         blk_size = cmd->device->sector_size;
1385         dif_bytes = (data_bytes / blk_size) * 8;
1386
1387         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1388         case SCSI_PROT_READ_INSERT:
1389         case SCSI_PROT_WRITE_STRIP:
1390             total_bytes = data_bytes;
1391             data_bytes += dif_bytes;
1392             break;
1393
1394         case SCSI_PROT_READ_STRIP:
1395         case SCSI_PROT_WRITE_INSERT:
1396         case SCSI_PROT_READ_PASS:
1397         case SCSI_PROT_WRITE_PASS:
1398             total_bytes = data_bytes + dif_bytes;
1399             break;
1400         default:
1401             BUG();
1402         }
1403
1404         if (!qla2x00_hba_err_chk_enabled(sp))
1405                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1406         /* HBA error checking enabled */
1407         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1408                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1409                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1410                         SCSI_PROT_DIF_TYPE2))
1411                         fw_prot_opts |= BIT_10;
1412                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1413                     SCSI_PROT_DIF_TYPE3)
1414                         fw_prot_opts |= BIT_11;
1415         }
1416
1417         if (!bundling) {
1418                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1419         } else {
1420                 /*
1421                  * Configure Bundling if we need to fetch interlaving
1422                  * protection PCI accesses
1423                  */
1424                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1425                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1426                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1427                                                         tot_prot_dsds);
1428                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1429         }
1430
1431         /* Finish the common fields of CRC pkt */
1432         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1433         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1434         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1435         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1436         /* Fibre channel byte count */
1437         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1438         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1439             additional_fcpcdb_len);
1440         *fcp_dl = htonl(total_bytes);
1441
1442         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1443                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1444                 return QLA_SUCCESS;
1445         }
1446         /* Walks data segments */
1447
1448         cmd_pkt->control_flags |=
1449             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1450
1451         if (!bundling && tot_prot_dsds) {
1452                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1453                         cur_dsd, tot_dsds, NULL))
1454                         goto crc_queuing_error;
1455         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1456                         (tot_dsds - tot_prot_dsds), NULL))
1457                 goto crc_queuing_error;
1458
1459         if (bundling && tot_prot_dsds) {
1460                 /* Walks dif segments */
1461                 cmd_pkt->control_flags |=
1462                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1463                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1464                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1465                                 tot_prot_dsds, NULL))
1466                         goto crc_queuing_error;
1467         }
1468         return QLA_SUCCESS;
1469
1470 crc_queuing_error:
1471         /* Cleanup will be performed by the caller */
1472
1473         return QLA_FUNCTION_FAILED;
1474 }
1475
1476 /**
1477  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1478  * @sp: command to send to the ISP
1479  *
1480  * Returns non-zero if a failure occurred, else zero.
1481  */
1482 int
1483 qla24xx_start_scsi(srb_t *sp)
1484 {
1485         int             ret, nseg;
1486         unsigned long   flags;
1487         uint32_t        *clr_ptr;
1488         uint32_t        index;
1489         uint32_t        handle;
1490         struct cmd_type_7 *cmd_pkt;
1491         uint16_t        cnt;
1492         uint16_t        req_cnt;
1493         uint16_t        tot_dsds;
1494         struct req_que *req = NULL;
1495         struct rsp_que *rsp = NULL;
1496         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1497         struct scsi_qla_host *vha = sp->fcport->vha;
1498         struct qla_hw_data *ha = vha->hw;
1499         char            tag[2];
1500
1501         /* Setup device pointers. */
1502         ret = 0;
1503
1504         qla25xx_set_que(sp, &rsp);
1505         req = vha->req;
1506
1507         /* So we know we haven't pci_map'ed anything yet */
1508         tot_dsds = 0;
1509
1510         /* Send marker if required */
1511         if (vha->marker_needed != 0) {
1512                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1513                     QLA_SUCCESS)
1514                         return QLA_FUNCTION_FAILED;
1515                 vha->marker_needed = 0;
1516         }
1517
1518         /* Acquire ring specific lock */
1519         spin_lock_irqsave(&ha->hardware_lock, flags);
1520
1521         /* Check for room in outstanding command list. */
1522         handle = req->current_outstanding_cmd;
1523         for (index = 1; index < req->num_outstanding_cmds; index++) {
1524                 handle++;
1525                 if (handle == req->num_outstanding_cmds)
1526                         handle = 1;
1527                 if (!req->outstanding_cmds[handle])
1528                         break;
1529         }
1530         if (index == req->num_outstanding_cmds)
1531                 goto queuing_error;
1532
1533         /* Map the sg table so we have an accurate count of sg entries needed */
1534         if (scsi_sg_count(cmd)) {
1535                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1536                     scsi_sg_count(cmd), cmd->sc_data_direction);
1537                 if (unlikely(!nseg))
1538                         goto queuing_error;
1539         } else
1540                 nseg = 0;
1541
1542         tot_dsds = nseg;
1543         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1544         if (req->cnt < (req_cnt + 2)) {
1545                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1546                     RD_REG_DWORD_RELAXED(req->req_q_out);
1547                 if (req->ring_index < cnt)
1548                         req->cnt = cnt - req->ring_index;
1549                 else
1550                         req->cnt = req->length -
1551                                 (req->ring_index - cnt);
1552                 if (req->cnt < (req_cnt + 2))
1553                         goto queuing_error;
1554         }
1555
1556         /* Build command packet. */
1557         req->current_outstanding_cmd = handle;
1558         req->outstanding_cmds[handle] = sp;
1559         sp->handle = handle;
1560         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1561         req->cnt -= req_cnt;
1562
1563         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1564         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1565
1566         /* Zero out remaining portion of packet. */
1567         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1568         clr_ptr = (uint32_t *)cmd_pkt + 2;
1569         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1570         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1571
1572         /* Set NPORT-ID and LUN number*/
1573         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1574         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1575         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1576         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1577         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1578
1579         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1580         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1581
1582         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1583         if (scsi_populate_tag_msg(cmd, tag)) {
1584                 switch (tag[0]) {
1585                 case HEAD_OF_QUEUE_TAG:
1586                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1587                         break;
1588                 case ORDERED_QUEUE_TAG:
1589                         cmd_pkt->task = TSK_ORDERED;
1590                         break;
1591                 default:
1592                     cmd_pkt->task = TSK_SIMPLE;
1593                     break;
1594                 }
1595         } else {
1596                 cmd_pkt->task = TSK_SIMPLE;
1597         }
1598
1599         /* Load SCSI command packet. */
1600         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1601         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1602
1603         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1604
1605         /* Build IOCB segments */
1606         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1607
1608         /* Set total data segment count. */
1609         cmd_pkt->entry_count = (uint8_t)req_cnt;
1610         /* Specify response queue number where completion should happen */
1611         cmd_pkt->entry_status = (uint8_t) rsp->id;
1612         wmb();
1613         /* Adjust ring index. */
1614         req->ring_index++;
1615         if (req->ring_index == req->length) {
1616                 req->ring_index = 0;
1617                 req->ring_ptr = req->ring;
1618         } else
1619                 req->ring_ptr++;
1620
1621         sp->flags |= SRB_DMA_VALID;
1622
1623         /* Set chip new ring index. */
1624         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1625         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1626
1627         /* Manage unprocessed RIO/ZIO commands in response queue. */
1628         if (vha->flags.process_response_queue &&
1629                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1630                 qla24xx_process_response_queue(vha, rsp);
1631
1632         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1633         return QLA_SUCCESS;
1634
1635 queuing_error:
1636         if (tot_dsds)
1637                 scsi_dma_unmap(cmd);
1638
1639         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1640
1641         return QLA_FUNCTION_FAILED;
1642 }
1643
1644 /**
1645  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1646  * @sp: command to send to the ISP
1647  *
1648  * Returns non-zero if a failure occurred, else zero.
1649  */
1650 int
1651 qla24xx_dif_start_scsi(srb_t *sp)
1652 {
1653         int                     nseg;
1654         unsigned long           flags;
1655         uint32_t                *clr_ptr;
1656         uint32_t                index;
1657         uint32_t                handle;
1658         uint16_t                cnt;
1659         uint16_t                req_cnt = 0;
1660         uint16_t                tot_dsds;
1661         uint16_t                tot_prot_dsds;
1662         uint16_t                fw_prot_opts = 0;
1663         struct req_que          *req = NULL;
1664         struct rsp_que          *rsp = NULL;
1665         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1666         struct scsi_qla_host    *vha = sp->fcport->vha;
1667         struct qla_hw_data      *ha = vha->hw;
1668         struct cmd_type_crc_2   *cmd_pkt;
1669         uint32_t                status = 0;
1670
1671 #define QDSS_GOT_Q_SPACE        BIT_0
1672
1673         /* Only process protection or >16 cdb in this routine */
1674         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1675                 if (cmd->cmd_len <= 16)
1676                         return qla24xx_start_scsi(sp);
1677         }
1678
1679         /* Setup device pointers. */
1680
1681         qla25xx_set_que(sp, &rsp);
1682         req = vha->req;
1683
1684         /* So we know we haven't pci_map'ed anything yet */
1685         tot_dsds = 0;
1686
1687         /* Send marker if required */
1688         if (vha->marker_needed != 0) {
1689                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1690                     QLA_SUCCESS)
1691                         return QLA_FUNCTION_FAILED;
1692                 vha->marker_needed = 0;
1693         }
1694
1695         /* Acquire ring specific lock */
1696         spin_lock_irqsave(&ha->hardware_lock, flags);
1697
1698         /* Check for room in outstanding command list. */
1699         handle = req->current_outstanding_cmd;
1700         for (index = 1; index < req->num_outstanding_cmds; index++) {
1701                 handle++;
1702                 if (handle == req->num_outstanding_cmds)
1703                         handle = 1;
1704                 if (!req->outstanding_cmds[handle])
1705                         break;
1706         }
1707
1708         if (index == req->num_outstanding_cmds)
1709                 goto queuing_error;
1710
1711         /* Compute number of required data segments */
1712         /* Map the sg table so we have an accurate count of sg entries needed */
1713         if (scsi_sg_count(cmd)) {
1714                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1715                     scsi_sg_count(cmd), cmd->sc_data_direction);
1716                 if (unlikely(!nseg))
1717                         goto queuing_error;
1718                 else
1719                         sp->flags |= SRB_DMA_VALID;
1720
1721                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1722                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1723                         struct qla2_sgx sgx;
1724                         uint32_t        partial;
1725
1726                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1727                         sgx.tot_bytes = scsi_bufflen(cmd);
1728                         sgx.cur_sg = scsi_sglist(cmd);
1729                         sgx.sp = sp;
1730
1731                         nseg = 0;
1732                         while (qla24xx_get_one_block_sg(
1733                             cmd->device->sector_size, &sgx, &partial))
1734                                 nseg++;
1735                 }
1736         } else
1737                 nseg = 0;
1738
1739         /* number of required data segments */
1740         tot_dsds = nseg;
1741
1742         /* Compute number of required protection segments */
1743         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1744                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1745                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1746                 if (unlikely(!nseg))
1747                         goto queuing_error;
1748                 else
1749                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1750
1751                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1752                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1753                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1754                 }
1755         } else {
1756                 nseg = 0;
1757         }
1758
1759         req_cnt = 1;
1760         /* Total Data and protection sg segment(s) */
1761         tot_prot_dsds = nseg;
1762         tot_dsds += nseg;
1763         if (req->cnt < (req_cnt + 2)) {
1764                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1765                     RD_REG_DWORD_RELAXED(req->req_q_out);
1766                 if (req->ring_index < cnt)
1767                         req->cnt = cnt - req->ring_index;
1768                 else
1769                         req->cnt = req->length -
1770                                 (req->ring_index - cnt);
1771                 if (req->cnt < (req_cnt + 2))
1772                         goto queuing_error;
1773         }
1774
1775         status |= QDSS_GOT_Q_SPACE;
1776
1777         /* Build header part of command packet (excluding the OPCODE). */
1778         req->current_outstanding_cmd = handle;
1779         req->outstanding_cmds[handle] = sp;
1780         sp->handle = handle;
1781         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1782         req->cnt -= req_cnt;
1783
1784         /* Fill-in common area */
1785         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1786         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1787
1788         clr_ptr = (uint32_t *)cmd_pkt + 2;
1789         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1790
1791         /* Set NPORT-ID and LUN number*/
1792         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1793         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1794         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1795         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1796
1797         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1798         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1799
1800         /* Total Data and protection segment(s) */
1801         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1802
1803         /* Build IOCB segments and adjust for data protection segments */
1804         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1805             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1806                 QLA_SUCCESS)
1807                 goto queuing_error;
1808
1809         cmd_pkt->entry_count = (uint8_t)req_cnt;
1810         /* Specify response queue number where completion should happen */
1811         cmd_pkt->entry_status = (uint8_t) rsp->id;
1812         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1813         wmb();
1814
1815         /* Adjust ring index. */
1816         req->ring_index++;
1817         if (req->ring_index == req->length) {
1818                 req->ring_index = 0;
1819                 req->ring_ptr = req->ring;
1820         } else
1821                 req->ring_ptr++;
1822
1823         /* Set chip new ring index. */
1824         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1825         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1826
1827         /* Manage unprocessed RIO/ZIO commands in response queue. */
1828         if (vha->flags.process_response_queue &&
1829             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1830                 qla24xx_process_response_queue(vha, rsp);
1831
1832         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1833
1834         return QLA_SUCCESS;
1835
1836 queuing_error:
1837         if (status & QDSS_GOT_Q_SPACE) {
1838                 req->outstanding_cmds[handle] = NULL;
1839                 req->cnt += req_cnt;
1840         }
1841         /* Cleanup will be performed by the caller (queuecommand) */
1842
1843         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1844         return QLA_FUNCTION_FAILED;
1845 }
1846
1847
1848 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1849 {
1850         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1851         struct qla_hw_data *ha = sp->fcport->vha->hw;
1852         int affinity = cmd->request->cpu;
1853
1854         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1855                 affinity < ha->max_rsp_queues - 1)
1856                 *rsp = ha->rsp_q_map[affinity + 1];
1857          else
1858                 *rsp = ha->rsp_q_map[0];
1859 }
1860
1861 /* Generic Control-SRB manipulation functions. */
1862 void *
1863 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1864 {
1865         struct qla_hw_data *ha = vha->hw;
1866         struct req_que *req = ha->req_q_map[0];
1867         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1868         uint32_t index, handle;
1869         request_t *pkt;
1870         uint16_t cnt, req_cnt;
1871
1872         pkt = NULL;
1873         req_cnt = 1;
1874         handle = 0;
1875
1876         if (!sp)
1877                 goto skip_cmd_array;
1878
1879         /* Check for room in outstanding command list. */
1880         handle = req->current_outstanding_cmd;
1881         for (index = 1; index < req->num_outstanding_cmds; index++) {
1882                 handle++;
1883                 if (handle == req->num_outstanding_cmds)
1884                         handle = 1;
1885                 if (!req->outstanding_cmds[handle])
1886                         break;
1887         }
1888         if (index == req->num_outstanding_cmds) {
1889                 ql_log(ql_log_warn, vha, 0x700b,
1890                     "No room on outstanding cmd array.\n");
1891                 goto queuing_error;
1892         }
1893
1894         /* Prep command array. */
1895         req->current_outstanding_cmd = handle;
1896         req->outstanding_cmds[handle] = sp;
1897         sp->handle = handle;
1898
1899         /* Adjust entry-counts as needed. */
1900         if (sp->type != SRB_SCSI_CMD)
1901                 req_cnt = sp->iocbs;
1902
1903 skip_cmd_array:
1904         /* Check for room on request queue. */
1905         if (req->cnt < req_cnt) {
1906                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1907                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1908                 else if (IS_P3P_TYPE(ha))
1909                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1910                 else if (IS_FWI2_CAPABLE(ha))
1911                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1912                 else if (IS_QLAFX00(ha))
1913                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1914                 else
1915                         cnt = qla2x00_debounce_register(
1916                             ISP_REQ_Q_OUT(ha, &reg->isp));
1917
1918                 if  (req->ring_index < cnt)
1919                         req->cnt = cnt - req->ring_index;
1920                 else
1921                         req->cnt = req->length -
1922                             (req->ring_index - cnt);
1923         }
1924         if (req->cnt < req_cnt)
1925                 goto queuing_error;
1926
1927         /* Prep packet */
1928         req->cnt -= req_cnt;
1929         pkt = req->ring_ptr;
1930         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1931         if (IS_QLAFX00(ha)) {
1932                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1933                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1934         } else {
1935                 pkt->entry_count = req_cnt;
1936                 pkt->handle = handle;
1937         }
1938
1939 queuing_error:
1940         return pkt;
1941 }
1942
1943 static void
1944 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1945 {
1946         struct srb_iocb *lio = &sp->u.iocb_cmd;
1947
1948         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1949         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1950         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1951                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1952         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1953                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1954         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1955         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1956         logio->port_id[1] = sp->fcport->d_id.b.area;
1957         logio->port_id[2] = sp->fcport->d_id.b.domain;
1958         logio->vp_index = sp->fcport->vha->vp_idx;
1959 }
1960
1961 static void
1962 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1963 {
1964         struct qla_hw_data *ha = sp->fcport->vha->hw;
1965         struct srb_iocb *lio = &sp->u.iocb_cmd;
1966         uint16_t opts;
1967
1968         mbx->entry_type = MBX_IOCB_TYPE;
1969         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1970         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1971         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1972         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1973         if (HAS_EXTENDED_IDS(ha)) {
1974                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1975                 mbx->mb10 = cpu_to_le16(opts);
1976         } else {
1977                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1978         }
1979         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1980         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1981             sp->fcport->d_id.b.al_pa);
1982         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1983 }
1984
1985 static void
1986 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1987 {
1988         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1989         logio->control_flags =
1990             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1991         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1992         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1993         logio->port_id[1] = sp->fcport->d_id.b.area;
1994         logio->port_id[2] = sp->fcport->d_id.b.domain;
1995         logio->vp_index = sp->fcport->vha->vp_idx;
1996 }
1997
1998 static void
1999 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2000 {
2001         struct qla_hw_data *ha = sp->fcport->vha->hw;
2002
2003         mbx->entry_type = MBX_IOCB_TYPE;
2004         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2005         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2006         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2007             cpu_to_le16(sp->fcport->loop_id):
2008             cpu_to_le16(sp->fcport->loop_id << 8);
2009         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2010         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2011             sp->fcport->d_id.b.al_pa);
2012         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2013         /* Implicit: mbx->mbx10 = 0. */
2014 }
2015
2016 static void
2017 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2018 {
2019         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2020         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2021         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2022         logio->vp_index = sp->fcport->vha->vp_idx;
2023 }
2024
2025 static void
2026 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2027 {
2028         struct qla_hw_data *ha = sp->fcport->vha->hw;
2029
2030         mbx->entry_type = MBX_IOCB_TYPE;
2031         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2032         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2033         if (HAS_EXTENDED_IDS(ha)) {
2034                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2035                 mbx->mb10 = cpu_to_le16(BIT_0);
2036         } else {
2037                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2038         }
2039         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2040         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2041         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2042         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2043         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2044 }
2045
2046 static void
2047 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2048 {
2049         uint32_t flags;
2050         unsigned int lun;
2051         struct fc_port *fcport = sp->fcport;
2052         scsi_qla_host_t *vha = fcport->vha;
2053         struct qla_hw_data *ha = vha->hw;
2054         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2055         struct req_que *req = vha->req;
2056
2057         flags = iocb->u.tmf.flags;
2058         lun = iocb->u.tmf.lun;
2059
2060         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2061         tsk->entry_count = 1;
2062         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2063         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2064         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2065         tsk->control_flags = cpu_to_le32(flags);
2066         tsk->port_id[0] = fcport->d_id.b.al_pa;
2067         tsk->port_id[1] = fcport->d_id.b.area;
2068         tsk->port_id[2] = fcport->d_id.b.domain;
2069         tsk->vp_index = fcport->vha->vp_idx;
2070
2071         if (flags == TCF_LUN_RESET) {
2072                 int_to_scsilun(lun, &tsk->lun);
2073                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2074                         sizeof(tsk->lun));
2075         }
2076 }
2077
2078 static void
2079 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2080 {
2081         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2082
2083         els_iocb->entry_type = ELS_IOCB_TYPE;
2084         els_iocb->entry_count = 1;
2085         els_iocb->sys_define = 0;
2086         els_iocb->entry_status = 0;
2087         els_iocb->handle = sp->handle;
2088         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2089         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2090         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2091         els_iocb->sof_type = EST_SOFI3;
2092         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2093
2094         els_iocb->opcode =
2095             sp->type == SRB_ELS_CMD_RPT ?
2096             bsg_job->request->rqst_data.r_els.els_code :
2097             bsg_job->request->rqst_data.h_els.command_code;
2098         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2099         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2100         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2101         els_iocb->control_flags = 0;
2102         els_iocb->rx_byte_count =
2103             cpu_to_le32(bsg_job->reply_payload.payload_len);
2104         els_iocb->tx_byte_count =
2105             cpu_to_le32(bsg_job->request_payload.payload_len);
2106
2107         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2108             (bsg_job->request_payload.sg_list)));
2109         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2110             (bsg_job->request_payload.sg_list)));
2111         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2112             (bsg_job->request_payload.sg_list));
2113
2114         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2115             (bsg_job->reply_payload.sg_list)));
2116         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2117             (bsg_job->reply_payload.sg_list)));
2118         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2119             (bsg_job->reply_payload.sg_list));
2120
2121         sp->fcport->vha->qla_stats.control_requests++;
2122 }
2123
2124 static void
2125 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2126 {
2127         uint16_t        avail_dsds;
2128         uint32_t        *cur_dsd;
2129         struct scatterlist *sg;
2130         int index;
2131         uint16_t tot_dsds;
2132         scsi_qla_host_t *vha = sp->fcport->vha;
2133         struct qla_hw_data *ha = vha->hw;
2134         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2135         int loop_iterartion = 0;
2136         int cont_iocb_prsnt = 0;
2137         int entry_count = 1;
2138
2139         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2140         ct_iocb->entry_type = CT_IOCB_TYPE;
2141         ct_iocb->entry_status = 0;
2142         ct_iocb->handle1 = sp->handle;
2143         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2144         ct_iocb->status = __constant_cpu_to_le16(0);
2145         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2146         ct_iocb->timeout = 0;
2147         ct_iocb->cmd_dsd_count =
2148             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2149         ct_iocb->total_dsd_count =
2150             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2151         ct_iocb->req_bytecount =
2152             cpu_to_le32(bsg_job->request_payload.payload_len);
2153         ct_iocb->rsp_bytecount =
2154             cpu_to_le32(bsg_job->reply_payload.payload_len);
2155
2156         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2157             (bsg_job->request_payload.sg_list)));
2158         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2159             (bsg_job->request_payload.sg_list)));
2160         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2161
2162         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2163             (bsg_job->reply_payload.sg_list)));
2164         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2165             (bsg_job->reply_payload.sg_list)));
2166         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2167
2168         avail_dsds = 1;
2169         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2170         index = 0;
2171         tot_dsds = bsg_job->reply_payload.sg_cnt;
2172
2173         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2174                 dma_addr_t       sle_dma;
2175                 cont_a64_entry_t *cont_pkt;
2176
2177                 /* Allocate additional continuation packets? */
2178                 if (avail_dsds == 0) {
2179                         /*
2180                         * Five DSDs are available in the Cont.
2181                         * Type 1 IOCB.
2182                                */
2183                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2184                             vha->hw->req_q_map[0]);
2185                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2186                         avail_dsds = 5;
2187                         cont_iocb_prsnt = 1;
2188                         entry_count++;
2189                 }
2190
2191                 sle_dma = sg_dma_address(sg);
2192                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2193                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2194                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2195                 loop_iterartion++;
2196                 avail_dsds--;
2197         }
2198         ct_iocb->entry_count = entry_count;
2199
2200         sp->fcport->vha->qla_stats.control_requests++;
2201 }
2202
2203 static void
2204 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2205 {
2206         uint16_t        avail_dsds;
2207         uint32_t        *cur_dsd;
2208         struct scatterlist *sg;
2209         int index;
2210         uint16_t tot_dsds;
2211         scsi_qla_host_t *vha = sp->fcport->vha;
2212         struct qla_hw_data *ha = vha->hw;
2213         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2214         int loop_iterartion = 0;
2215         int cont_iocb_prsnt = 0;
2216         int entry_count = 1;
2217
2218         ct_iocb->entry_type = CT_IOCB_TYPE;
2219         ct_iocb->entry_status = 0;
2220         ct_iocb->sys_define = 0;
2221         ct_iocb->handle = sp->handle;
2222
2223         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2224         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2225         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2226
2227         ct_iocb->cmd_dsd_count =
2228             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2229         ct_iocb->timeout = 0;
2230         ct_iocb->rsp_dsd_count =
2231             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2232         ct_iocb->rsp_byte_count =
2233             cpu_to_le32(bsg_job->reply_payload.payload_len);
2234         ct_iocb->cmd_byte_count =
2235             cpu_to_le32(bsg_job->request_payload.payload_len);
2236         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2237             (bsg_job->request_payload.sg_list)));
2238         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2239            (bsg_job->request_payload.sg_list)));
2240         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2241             (bsg_job->request_payload.sg_list));
2242
2243         avail_dsds = 1;
2244         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2245         index = 0;
2246         tot_dsds = bsg_job->reply_payload.sg_cnt;
2247
2248         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2249                 dma_addr_t       sle_dma;
2250                 cont_a64_entry_t *cont_pkt;
2251
2252                 /* Allocate additional continuation packets? */
2253                 if (avail_dsds == 0) {
2254                         /*
2255                         * Five DSDs are available in the Cont.
2256                         * Type 1 IOCB.
2257                                */
2258                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2259                             ha->req_q_map[0]);
2260                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2261                         avail_dsds = 5;
2262                         cont_iocb_prsnt = 1;
2263                         entry_count++;
2264                 }
2265
2266                 sle_dma = sg_dma_address(sg);
2267                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2268                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2269                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2270                 loop_iterartion++;
2271                 avail_dsds--;
2272         }
2273         ct_iocb->entry_count = entry_count;
2274 }
2275
2276 /*
2277  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2278  * @sp: command to send to the ISP
2279  *
2280  * Returns non-zero if a failure occurred, else zero.
2281  */
2282 int
2283 qla82xx_start_scsi(srb_t *sp)
2284 {
2285         int             ret, nseg;
2286         unsigned long   flags;
2287         struct scsi_cmnd *cmd;
2288         uint32_t        *clr_ptr;
2289         uint32_t        index;
2290         uint32_t        handle;
2291         uint16_t        cnt;
2292         uint16_t        req_cnt;
2293         uint16_t        tot_dsds;
2294         struct device_reg_82xx __iomem *reg;
2295         uint32_t dbval;
2296         uint32_t *fcp_dl;
2297         uint8_t additional_cdb_len;
2298         struct ct6_dsd *ctx;
2299         struct scsi_qla_host *vha = sp->fcport->vha;
2300         struct qla_hw_data *ha = vha->hw;
2301         struct req_que *req = NULL;
2302         struct rsp_que *rsp = NULL;
2303         char tag[2];
2304
2305         /* Setup device pointers. */
2306         ret = 0;
2307         reg = &ha->iobase->isp82;
2308         cmd = GET_CMD_SP(sp);
2309         req = vha->req;
2310         rsp = ha->rsp_q_map[0];
2311
2312         /* So we know we haven't pci_map'ed anything yet */
2313         tot_dsds = 0;
2314
2315         dbval = 0x04 | (ha->portnum << 5);
2316
2317         /* Send marker if required */
2318         if (vha->marker_needed != 0) {
2319                 if (qla2x00_marker(vha, req,
2320                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2321                         ql_log(ql_log_warn, vha, 0x300c,
2322                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2323                         return QLA_FUNCTION_FAILED;
2324                 }
2325                 vha->marker_needed = 0;
2326         }
2327
2328         /* Acquire ring specific lock */
2329         spin_lock_irqsave(&ha->hardware_lock, flags);
2330
2331         /* Check for room in outstanding command list. */
2332         handle = req->current_outstanding_cmd;
2333         for (index = 1; index < req->num_outstanding_cmds; index++) {
2334                 handle++;
2335                 if (handle == req->num_outstanding_cmds)
2336                         handle = 1;
2337                 if (!req->outstanding_cmds[handle])
2338                         break;
2339         }
2340         if (index == req->num_outstanding_cmds)
2341                 goto queuing_error;
2342
2343         /* Map the sg table so we have an accurate count of sg entries needed */
2344         if (scsi_sg_count(cmd)) {
2345                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2346                     scsi_sg_count(cmd), cmd->sc_data_direction);
2347                 if (unlikely(!nseg))
2348                         goto queuing_error;
2349         } else
2350                 nseg = 0;
2351
2352         tot_dsds = nseg;
2353
2354         if (tot_dsds > ql2xshiftctondsd) {
2355                 struct cmd_type_6 *cmd_pkt;
2356                 uint16_t more_dsd_lists = 0;
2357                 struct dsd_dma *dsd_ptr;
2358                 uint16_t i;
2359
2360                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2361                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2362                         ql_dbg(ql_dbg_io, vha, 0x300d,
2363                             "Num of DSD list %d is than %d for cmd=%p.\n",
2364                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2365                             cmd);
2366                         goto queuing_error;
2367                 }
2368
2369                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2370                         goto sufficient_dsds;
2371                 else
2372                         more_dsd_lists -= ha->gbl_dsd_avail;
2373
2374                 for (i = 0; i < more_dsd_lists; i++) {
2375                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2376                         if (!dsd_ptr) {
2377                                 ql_log(ql_log_fatal, vha, 0x300e,
2378                                     "Failed to allocate memory for dsd_dma "
2379                                     "for cmd=%p.\n", cmd);
2380                                 goto queuing_error;
2381                         }
2382
2383                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2384                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2385                         if (!dsd_ptr->dsd_addr) {
2386                                 kfree(dsd_ptr);
2387                                 ql_log(ql_log_fatal, vha, 0x300f,
2388                                     "Failed to allocate memory for dsd_addr "
2389                                     "for cmd=%p.\n", cmd);
2390                                 goto queuing_error;
2391                         }
2392                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2393                         ha->gbl_dsd_avail++;
2394                 }
2395
2396 sufficient_dsds:
2397                 req_cnt = 1;
2398
2399                 if (req->cnt < (req_cnt + 2)) {
2400                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2401                                 &reg->req_q_out[0]);
2402                         if (req->ring_index < cnt)
2403                                 req->cnt = cnt - req->ring_index;
2404                         else
2405                                 req->cnt = req->length -
2406                                         (req->ring_index - cnt);
2407                         if (req->cnt < (req_cnt + 2))
2408                                 goto queuing_error;
2409                 }
2410
2411                 ctx = sp->u.scmd.ctx =
2412                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2413                 if (!ctx) {
2414                         ql_log(ql_log_fatal, vha, 0x3010,
2415                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2416                         goto queuing_error;
2417                 }
2418
2419                 memset(ctx, 0, sizeof(struct ct6_dsd));
2420                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2421                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2422                 if (!ctx->fcp_cmnd) {
2423                         ql_log(ql_log_fatal, vha, 0x3011,
2424                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2425                         goto queuing_error;
2426                 }
2427
2428                 /* Initialize the DSD list and dma handle */
2429                 INIT_LIST_HEAD(&ctx->dsd_list);
2430                 ctx->dsd_use_cnt = 0;
2431
2432                 if (cmd->cmd_len > 16) {
2433                         additional_cdb_len = cmd->cmd_len - 16;
2434                         if ((cmd->cmd_len % 4) != 0) {
2435                                 /* SCSI command bigger than 16 bytes must be
2436                                  * multiple of 4
2437                                  */
2438                                 ql_log(ql_log_warn, vha, 0x3012,
2439                                     "scsi cmd len %d not multiple of 4 "
2440                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2441                                 goto queuing_error_fcp_cmnd;
2442                         }
2443                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2444                 } else {
2445                         additional_cdb_len = 0;
2446                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2447                 }
2448
2449                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2450                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2451
2452                 /* Zero out remaining portion of packet. */
2453                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2454                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2455                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2456                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2457
2458                 /* Set NPORT-ID and LUN number*/
2459                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2460                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2461                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2462                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2463                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2464
2465                 /* Build IOCB segments */
2466                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2467                         goto queuing_error_fcp_cmnd;
2468
2469                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2470                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2471
2472                 /* build FCP_CMND IU */
2473                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2474                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2475                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2476
2477                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2478                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2479                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2480                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2481
2482                 /*
2483                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2484                  */
2485                 if (scsi_populate_tag_msg(cmd, tag)) {
2486                         switch (tag[0]) {
2487                         case HEAD_OF_QUEUE_TAG:
2488                                 ctx->fcp_cmnd->task_attribute =
2489                                     TSK_HEAD_OF_QUEUE;
2490                                 break;
2491                         case ORDERED_QUEUE_TAG:
2492                                 ctx->fcp_cmnd->task_attribute =
2493                                     TSK_ORDERED;
2494                                 break;
2495                         }
2496                 }
2497
2498                 /* Populate the FCP_PRIO. */
2499                 if (ha->flags.fcp_prio_enabled)
2500                         ctx->fcp_cmnd->task_attribute |=
2501                             sp->fcport->fcp_prio << 3;
2502
2503                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2504
2505                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2506                     additional_cdb_len);
2507                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2508
2509                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2510                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2511                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2512                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2513                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2514
2515                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2516                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2517                 /* Set total data segment count. */
2518                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2519                 /* Specify response queue number where
2520                  * completion should happen
2521                  */
2522                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2523         } else {
2524                 struct cmd_type_7 *cmd_pkt;
2525                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2526                 if (req->cnt < (req_cnt + 2)) {
2527                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2528                             &reg->req_q_out[0]);
2529                         if (req->ring_index < cnt)
2530                                 req->cnt = cnt - req->ring_index;
2531                         else
2532                                 req->cnt = req->length -
2533                                         (req->ring_index - cnt);
2534                 }
2535                 if (req->cnt < (req_cnt + 2))
2536                         goto queuing_error;
2537
2538                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2539                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2540
2541                 /* Zero out remaining portion of packet. */
2542                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2543                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2544                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2545                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2546
2547                 /* Set NPORT-ID and LUN number*/
2548                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2549                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2550                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2551                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2552                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2553
2554                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2555                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2556                     sizeof(cmd_pkt->lun));
2557
2558                 /*
2559                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2560                  */
2561                 if (scsi_populate_tag_msg(cmd, tag)) {
2562                         switch (tag[0]) {
2563                         case HEAD_OF_QUEUE_TAG:
2564                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2565                                 break;
2566                         case ORDERED_QUEUE_TAG:
2567                                 cmd_pkt->task = TSK_ORDERED;
2568                                 break;
2569                         }
2570                 }
2571
2572                 /* Populate the FCP_PRIO. */
2573                 if (ha->flags.fcp_prio_enabled)
2574                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2575
2576                 /* Load SCSI command packet. */
2577                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2578                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2579
2580                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2581
2582                 /* Build IOCB segments */
2583                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2584
2585                 /* Set total data segment count. */
2586                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2587                 /* Specify response queue number where
2588                  * completion should happen.
2589                  */
2590                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2591
2592         }
2593         /* Build command packet. */
2594         req->current_outstanding_cmd = handle;
2595         req->outstanding_cmds[handle] = sp;
2596         sp->handle = handle;
2597         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2598         req->cnt -= req_cnt;
2599         wmb();
2600
2601         /* Adjust ring index. */
2602         req->ring_index++;
2603         if (req->ring_index == req->length) {
2604                 req->ring_index = 0;
2605                 req->ring_ptr = req->ring;
2606         } else
2607                 req->ring_ptr++;
2608
2609         sp->flags |= SRB_DMA_VALID;
2610
2611         /* Set chip new ring index. */
2612         /* write, read and verify logic */
2613         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2614         if (ql2xdbwr)
2615                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2616         else {
2617                 WRT_REG_DWORD(
2618                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2619                         dbval);
2620                 wmb();
2621                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2622                         WRT_REG_DWORD(
2623                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2624                                 dbval);
2625                         wmb();
2626                 }
2627         }
2628
2629         /* Manage unprocessed RIO/ZIO commands in response queue. */
2630         if (vha->flags.process_response_queue &&
2631             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2632                 qla24xx_process_response_queue(vha, rsp);
2633
2634         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2635         return QLA_SUCCESS;
2636
2637 queuing_error_fcp_cmnd:
2638         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2639 queuing_error:
2640         if (tot_dsds)
2641                 scsi_dma_unmap(cmd);
2642
2643         if (sp->u.scmd.ctx) {
2644                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2645                 sp->u.scmd.ctx = NULL;
2646         }
2647         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2648
2649         return QLA_FUNCTION_FAILED;
2650 }
2651
2652 void
2653 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2654 {
2655         struct srb_iocb *aio = &sp->u.iocb_cmd;
2656         scsi_qla_host_t *vha = sp->fcport->vha;
2657         struct req_que *req = vha->req;
2658
2659         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2660         abt_iocb->entry_type = ABORT_IOCB_TYPE;
2661         abt_iocb->entry_count = 1;
2662         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2663         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2664         abt_iocb->handle_to_abort =
2665             cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2666         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2667         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2668         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2669         abt_iocb->vp_index = vha->vp_idx;
2670         abt_iocb->req_que_no = cpu_to_le16(req->id);
2671         /* Send the command to the firmware */
2672         wmb();
2673 }
2674
2675 int
2676 qla2x00_start_sp(srb_t *sp)
2677 {
2678         int rval;
2679         struct qla_hw_data *ha = sp->fcport->vha->hw;
2680         void *pkt;
2681         unsigned long flags;
2682
2683         rval = QLA_FUNCTION_FAILED;
2684         spin_lock_irqsave(&ha->hardware_lock, flags);
2685         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2686         if (!pkt) {
2687                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2688                     "qla2x00_alloc_iocbs failed.\n");
2689                 goto done;
2690         }
2691
2692         rval = QLA_SUCCESS;
2693         switch (sp->type) {
2694         case SRB_LOGIN_CMD:
2695                 IS_FWI2_CAPABLE(ha) ?
2696                     qla24xx_login_iocb(sp, pkt) :
2697                     qla2x00_login_iocb(sp, pkt);
2698                 break;
2699         case SRB_LOGOUT_CMD:
2700                 IS_FWI2_CAPABLE(ha) ?
2701                     qla24xx_logout_iocb(sp, pkt) :
2702                     qla2x00_logout_iocb(sp, pkt);
2703                 break;
2704         case SRB_ELS_CMD_RPT:
2705         case SRB_ELS_CMD_HST:
2706                 qla24xx_els_iocb(sp, pkt);
2707                 break;
2708         case SRB_CT_CMD:
2709                 IS_FWI2_CAPABLE(ha) ?
2710                     qla24xx_ct_iocb(sp, pkt) :
2711                     qla2x00_ct_iocb(sp, pkt);
2712                 break;
2713         case SRB_ADISC_CMD:
2714                 IS_FWI2_CAPABLE(ha) ?
2715                     qla24xx_adisc_iocb(sp, pkt) :
2716                     qla2x00_adisc_iocb(sp, pkt);
2717                 break;
2718         case SRB_TM_CMD:
2719                 IS_QLAFX00(ha) ?
2720                     qlafx00_tm_iocb(sp, pkt) :
2721                     qla24xx_tm_iocb(sp, pkt);
2722                 break;
2723         case SRB_FXIOCB_DCMD:
2724         case SRB_FXIOCB_BCMD:
2725                 qlafx00_fxdisc_iocb(sp, pkt);
2726                 break;
2727         case SRB_ABT_CMD:
2728                 IS_QLAFX00(ha) ?
2729                         qlafx00_abort_iocb(sp, pkt) :
2730                         qla24xx_abort_iocb(sp, pkt);
2731                 break;
2732         default:
2733                 break;
2734         }
2735
2736         wmb();
2737         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2738 done:
2739         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2740         return rval;
2741 }
2742
2743 static void
2744 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2745                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2746 {
2747         uint16_t avail_dsds;
2748         uint32_t *cur_dsd;
2749         uint32_t req_data_len = 0;
2750         uint32_t rsp_data_len = 0;
2751         struct scatterlist *sg;
2752         int index;
2753         int entry_count = 1;
2754         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2755
2756         /*Update entry type to indicate bidir command */
2757         *((uint32_t *)(&cmd_pkt->entry_type)) =
2758                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2759
2760         /* Set the transfer direction, in this set both flags
2761          * Also set the BD_WRAP_BACK flag, firmware will take care
2762          * assigning DID=SID for outgoing pkts.
2763          */
2764         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2765         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2766         cmd_pkt->control_flags =
2767                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2768                                                         BD_WRAP_BACK);
2769
2770         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2771         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2772         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2773         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2774
2775         vha->bidi_stats.transfer_bytes += req_data_len;
2776         vha->bidi_stats.io_count++;
2777
2778         vha->qla_stats.output_bytes += req_data_len;
2779         vha->qla_stats.output_requests++;
2780
2781         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2782          * are bundled in continuation iocb
2783          */
2784         avail_dsds = 1;
2785         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2786
2787         index = 0;
2788
2789         for_each_sg(bsg_job->request_payload.sg_list, sg,
2790                                 bsg_job->request_payload.sg_cnt, index) {
2791                 dma_addr_t sle_dma;
2792                 cont_a64_entry_t *cont_pkt;
2793
2794                 /* Allocate additional continuation packets */
2795                 if (avail_dsds == 0) {
2796                         /* Continuation type 1 IOCB can accomodate
2797                          * 5 DSDS
2798                          */
2799                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2800                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2801                         avail_dsds = 5;
2802                         entry_count++;
2803                 }
2804                 sle_dma = sg_dma_address(sg);
2805                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2806                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2807                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2808                 avail_dsds--;
2809         }
2810         /* For read request DSD will always goes to continuation IOCB
2811          * and follow the write DSD. If there is room on the current IOCB
2812          * then it is added to that IOCB else new continuation IOCB is
2813          * allocated.
2814          */
2815         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2816                                 bsg_job->reply_payload.sg_cnt, index) {
2817                 dma_addr_t sle_dma;
2818                 cont_a64_entry_t *cont_pkt;
2819
2820                 /* Allocate additional continuation packets */
2821                 if (avail_dsds == 0) {
2822                         /* Continuation type 1 IOCB can accomodate
2823                          * 5 DSDS
2824                          */
2825                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2826                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2827                         avail_dsds = 5;
2828                         entry_count++;
2829                 }
2830                 sle_dma = sg_dma_address(sg);
2831                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2832                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2833                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2834                 avail_dsds--;
2835         }
2836         /* This value should be same as number of IOCB required for this cmd */
2837         cmd_pkt->entry_count = entry_count;
2838 }
2839
2840 int
2841 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2842 {
2843
2844         struct qla_hw_data *ha = vha->hw;
2845         unsigned long flags;
2846         uint32_t handle;
2847         uint32_t index;
2848         uint16_t req_cnt;
2849         uint16_t cnt;
2850         uint32_t *clr_ptr;
2851         struct cmd_bidir *cmd_pkt = NULL;
2852         struct rsp_que *rsp;
2853         struct req_que *req;
2854         int rval = EXT_STATUS_OK;
2855
2856         rval = QLA_SUCCESS;
2857
2858         rsp = ha->rsp_q_map[0];
2859         req = vha->req;
2860
2861         /* Send marker if required */
2862         if (vha->marker_needed != 0) {
2863                 if (qla2x00_marker(vha, req,
2864                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2865                         return EXT_STATUS_MAILBOX;
2866                 vha->marker_needed = 0;
2867         }
2868
2869         /* Acquire ring specific lock */
2870         spin_lock_irqsave(&ha->hardware_lock, flags);
2871
2872         /* Check for room in outstanding command list. */
2873         handle = req->current_outstanding_cmd;
2874         for (index = 1; index < req->num_outstanding_cmds; index++) {
2875                 handle++;
2876         if (handle == req->num_outstanding_cmds)
2877                 handle = 1;
2878         if (!req->outstanding_cmds[handle])
2879                 break;
2880         }
2881
2882         if (index == req->num_outstanding_cmds) {
2883                 rval = EXT_STATUS_BUSY;
2884                 goto queuing_error;
2885         }
2886
2887         /* Calculate number of IOCB required */
2888         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2889
2890         /* Check for room on request queue. */
2891         if (req->cnt < req_cnt + 2) {
2892                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2893                     RD_REG_DWORD_RELAXED(req->req_q_out);
2894                 if  (req->ring_index < cnt)
2895                         req->cnt = cnt - req->ring_index;
2896                 else
2897                         req->cnt = req->length -
2898                                 (req->ring_index - cnt);
2899         }
2900         if (req->cnt < req_cnt + 2) {
2901                 rval = EXT_STATUS_BUSY;
2902                 goto queuing_error;
2903         }
2904
2905         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2906         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2907
2908         /* Zero out remaining portion of packet. */
2909         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2910         clr_ptr = (uint32_t *)cmd_pkt + 2;
2911         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2912
2913         /* Set NPORT-ID  (of vha)*/
2914         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2915         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2916         cmd_pkt->port_id[1] = vha->d_id.b.area;
2917         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2918
2919         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2920         cmd_pkt->entry_status = (uint8_t) rsp->id;
2921         /* Build command packet. */
2922         req->current_outstanding_cmd = handle;
2923         req->outstanding_cmds[handle] = sp;
2924         sp->handle = handle;
2925         req->cnt -= req_cnt;
2926
2927         /* Send the command to the firmware */
2928         wmb();
2929         qla2x00_start_iocbs(vha, req);
2930 queuing_error:
2931         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2932         return rval;
2933 }