]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge remote-tracking branches 'asoc/fix/rt5663', 'asoc/fix/rt5665', 'asoc/fix/samsun...
[karo-tx-linux.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @cmd: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139             cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140             cpu_to_le32(CONTINUE_A64_TYPE);
141
142         return (cont_pkt);
143 }
144
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         uint32_t        *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         *((uint32_t *)(&cmd_pkt->entry_type)) =
205             cpu_to_le32(COMMAND_TYPE);
206
207         /* No data transfer */
208         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209                 cmd_pkt->byte_count = cpu_to_le32(0);
210                 return;
211         }
212
213         vha = sp->vha;
214         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216         /* Three DSDs are available in the Command Type 2 IOCB */
217         avail_dsds = 3;
218         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220         /* Load data segments */
221         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222                 cont_entry_t *cont_pkt;
223
224                 /* Allocate additional continuation packets? */
225                 if (avail_dsds == 0) {
226                         /*
227                          * Seven DSDs are available in the Continuation
228                          * Type 0 IOCB.
229                          */
230                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232                         avail_dsds = 7;
233                 }
234
235                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237                 avail_dsds--;
238         }
239 }
240
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252         uint16_t        avail_dsds;
253         uint32_t        *cur_dsd;
254         scsi_qla_host_t *vha;
255         struct scsi_cmnd *cmd;
256         struct scatterlist *sg;
257         int i;
258
259         cmd = GET_CMD_SP(sp);
260
261         /* Update entry type to indicate Command Type 3 IOCB */
262         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
263
264         /* No data transfer */
265         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266                 cmd_pkt->byte_count = cpu_to_le32(0);
267                 return;
268         }
269
270         vha = sp->vha;
271         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272
273         /* Two DSDs are available in the Command Type 3 IOCB */
274         avail_dsds = 2;
275         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276
277         /* Load data segments */
278         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279                 dma_addr_t      sle_dma;
280                 cont_a64_entry_t *cont_pkt;
281
282                 /* Allocate additional continuation packets? */
283                 if (avail_dsds == 0) {
284                         /*
285                          * Five DSDs are available in the Continuation
286                          * Type 1 IOCB.
287                          */
288                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290                         avail_dsds = 5;
291                 }
292
293                 sle_dma = sg_dma_address(sg);
294                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297                 avail_dsds--;
298         }
299 }
300
301 /**
302  * qla2x00_start_scsi() - Send a SCSI command to the ISP
303  * @sp: command to send to the ISP
304  *
305  * Returns non-zero if a failure occurred, else zero.
306  */
307 int
308 qla2x00_start_scsi(srb_t *sp)
309 {
310         int             nseg;
311         unsigned long   flags;
312         scsi_qla_host_t *vha;
313         struct scsi_cmnd *cmd;
314         uint32_t        *clr_ptr;
315         uint32_t        index;
316         uint32_t        handle;
317         cmd_entry_t     *cmd_pkt;
318         uint16_t        cnt;
319         uint16_t        req_cnt;
320         uint16_t        tot_dsds;
321         struct device_reg_2xxx __iomem *reg;
322         struct qla_hw_data *ha;
323         struct req_que *req;
324         struct rsp_que *rsp;
325
326         /* Setup device pointers. */
327         vha = sp->vha;
328         ha = vha->hw;
329         reg = &ha->iobase->isp;
330         cmd = GET_CMD_SP(sp);
331         req = ha->req_q_map[0];
332         rsp = ha->rsp_q_map[0];
333         /* So we know we haven't pci_map'ed anything yet */
334         tot_dsds = 0;
335
336         /* Send marker if required */
337         if (vha->marker_needed != 0) {
338                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339                     QLA_SUCCESS) {
340                         return (QLA_FUNCTION_FAILED);
341                 }
342                 vha->marker_needed = 0;
343         }
344
345         /* Acquire ring specific lock */
346         spin_lock_irqsave(&ha->hardware_lock, flags);
347
348         /* Check for room in outstanding command list. */
349         handle = req->current_outstanding_cmd;
350         for (index = 1; index < req->num_outstanding_cmds; index++) {
351                 handle++;
352                 if (handle == req->num_outstanding_cmds)
353                         handle = 1;
354                 if (!req->outstanding_cmds[handle])
355                         break;
356         }
357         if (index == req->num_outstanding_cmds)
358                 goto queuing_error;
359
360         /* Map the sg table so we have an accurate count of sg entries needed */
361         if (scsi_sg_count(cmd)) {
362                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363                     scsi_sg_count(cmd), cmd->sc_data_direction);
364                 if (unlikely(!nseg))
365                         goto queuing_error;
366         } else
367                 nseg = 0;
368
369         tot_dsds = nseg;
370
371         /* Calculate the number of request entries needed. */
372         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373         if (req->cnt < (req_cnt + 2)) {
374                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375                 if (req->ring_index < cnt)
376                         req->cnt = cnt - req->ring_index;
377                 else
378                         req->cnt = req->length -
379                             (req->ring_index - cnt);
380                 /* If still no head room then bail out */
381                 if (req->cnt < (req_cnt + 2))
382                         goto queuing_error;
383         }
384
385         /* Build command packet */
386         req->current_outstanding_cmd = handle;
387         req->outstanding_cmds[handle] = sp;
388         sp->handle = handle;
389         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
390         req->cnt -= req_cnt;
391
392         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393         cmd_pkt->handle = handle;
394         /* Zero out remaining portion of packet. */
395         clr_ptr = (uint32_t *)cmd_pkt + 2;
396         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398
399         /* Set target ID and LUN number*/
400         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
403
404         /* Load SCSI command packet. */
405         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
407
408         /* Build IOCB segments */
409         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
410
411         /* Set total data segment count. */
412         cmd_pkt->entry_count = (uint8_t)req_cnt;
413         wmb();
414
415         /* Adjust ring index. */
416         req->ring_index++;
417         if (req->ring_index == req->length) {
418                 req->ring_index = 0;
419                 req->ring_ptr = req->ring;
420         } else
421                 req->ring_ptr++;
422
423         sp->flags |= SRB_DMA_VALID;
424
425         /* Set chip new ring index. */
426         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
428
429         /* Manage unprocessed RIO/ZIO commands in response queue. */
430         if (vha->flags.process_response_queue &&
431             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432                 qla2x00_process_response_queue(rsp);
433
434         spin_unlock_irqrestore(&ha->hardware_lock, flags);
435         return (QLA_SUCCESS);
436
437 queuing_error:
438         if (tot_dsds)
439                 scsi_dma_unmap(cmd);
440
441         spin_unlock_irqrestore(&ha->hardware_lock, flags);
442
443         return (QLA_FUNCTION_FAILED);
444 }
445
446 /**
447  * qla2x00_start_iocbs() - Execute the IOCB command
448  */
449 void
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451 {
452         struct qla_hw_data *ha = vha->hw;
453         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
454
455         if (IS_P3P_TYPE(ha)) {
456                 qla82xx_start_iocbs(vha);
457         } else {
458                 /* Adjust ring index. */
459                 req->ring_index++;
460                 if (req->ring_index == req->length) {
461                         req->ring_index = 0;
462                         req->ring_ptr = req->ring;
463                 } else
464                         req->ring_ptr++;
465
466                 /* Set chip new ring index. */
467                 if (ha->mqenable || IS_QLA27XX(ha)) {
468                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
469                 } else if (IS_QLA83XX(ha)) {
470                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
471                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
472                 } else if (IS_QLAFX00(ha)) {
473                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
474                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
475                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
476                 } else if (IS_FWI2_CAPABLE(ha)) {
477                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
478                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
479                 } else {
480                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
481                                 req->ring_index);
482                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
483                 }
484         }
485 }
486
487 /**
488  * qla2x00_marker() - Send a marker IOCB to the firmware.
489  * @ha: HA context
490  * @loop_id: loop ID
491  * @lun: LUN
492  * @type: marker modifier
493  *
494  * Can be called from both normal and interrupt context.
495  *
496  * Returns non-zero if a failure occurred, else zero.
497  */
498 static int
499 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500                         struct rsp_que *rsp, uint16_t loop_id,
501                         uint64_t lun, uint8_t type)
502 {
503         mrk_entry_t *mrk;
504         struct mrk_entry_24xx *mrk24 = NULL;
505
506         struct qla_hw_data *ha = vha->hw;
507         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
508
509         req = ha->req_q_map[0];
510         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
511         if (mrk == NULL) {
512                 ql_log(ql_log_warn, base_vha, 0x3026,
513                     "Failed to allocate Marker IOCB.\n");
514
515                 return (QLA_FUNCTION_FAILED);
516         }
517
518         mrk->entry_type = MARKER_TYPE;
519         mrk->modifier = type;
520         if (type != MK_SYNC_ALL) {
521                 if (IS_FWI2_CAPABLE(ha)) {
522                         mrk24 = (struct mrk_entry_24xx *) mrk;
523                         mrk24->nport_handle = cpu_to_le16(loop_id);
524                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
525                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
526                         mrk24->vp_index = vha->vp_idx;
527                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
528                 } else {
529                         SET_TARGET_ID(ha, mrk->target, loop_id);
530                         mrk->lun = cpu_to_le16((uint16_t)lun);
531                 }
532         }
533         wmb();
534
535         qla2x00_start_iocbs(vha, req);
536
537         return (QLA_SUCCESS);
538 }
539
540 int
541 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
542                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
543                 uint8_t type)
544 {
545         int ret;
546         unsigned long flags = 0;
547
548         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
551
552         return (ret);
553 }
554
555 /*
556  * qla2x00_issue_marker
557  *
558  * Issue marker
559  * Caller CAN have hardware lock held as specified by ha_locked parameter.
560  * Might release it, then reaquire.
561  */
562 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
563 {
564         if (ha_locked) {
565                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566                                         MK_SYNC_ALL) != QLA_SUCCESS)
567                         return QLA_FUNCTION_FAILED;
568         } else {
569                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570                                         MK_SYNC_ALL) != QLA_SUCCESS)
571                         return QLA_FUNCTION_FAILED;
572         }
573         vha->marker_needed = 0;
574
575         return QLA_SUCCESS;
576 }
577
578 static inline int
579 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
580         uint16_t tot_dsds)
581 {
582         uint32_t *cur_dsd = NULL;
583         scsi_qla_host_t *vha;
584         struct qla_hw_data *ha;
585         struct scsi_cmnd *cmd;
586         struct  scatterlist *cur_seg;
587         uint32_t *dsd_seg;
588         void *next_dsd;
589         uint8_t avail_dsds;
590         uint8_t first_iocb = 1;
591         uint32_t dsd_list_len;
592         struct dsd_dma *dsd_ptr;
593         struct ct6_dsd *ctx;
594
595         cmd = GET_CMD_SP(sp);
596
597         /* Update entry type to indicate Command Type 3 IOCB */
598         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
599
600         /* No data transfer */
601         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
602                 cmd_pkt->byte_count = cpu_to_le32(0);
603                 return 0;
604         }
605
606         vha = sp->vha;
607         ha = vha->hw;
608
609         /* Set transfer direction */
610         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
611                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
612                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
613                 vha->qla_stats.output_requests++;
614         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
615                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
616                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
617                 vha->qla_stats.input_requests++;
618         }
619
620         cur_seg = scsi_sglist(cmd);
621         ctx = GET_CMD_CTX_SP(sp);
622
623         while (tot_dsds) {
624                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625                     QLA_DSDS_PER_IOCB : tot_dsds;
626                 tot_dsds -= avail_dsds;
627                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
628
629                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630                     struct dsd_dma, list);
631                 next_dsd = dsd_ptr->dsd_addr;
632                 list_del(&dsd_ptr->list);
633                 ha->gbl_dsd_avail--;
634                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
635                 ctx->dsd_use_cnt++;
636                 ha->gbl_dsd_inuse++;
637
638                 if (first_iocb) {
639                         first_iocb = 0;
640                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
644                 } else {
645                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
648                 }
649                 cur_dsd = (uint32_t *)next_dsd;
650                 while (avail_dsds) {
651                         dma_addr_t      sle_dma;
652
653                         sle_dma = sg_dma_address(cur_seg);
654                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657                         cur_seg = sg_next(cur_seg);
658                         avail_dsds--;
659                 }
660         }
661
662         /* Null termination */
663         *cur_dsd++ =  0;
664         *cur_dsd++ = 0;
665         *cur_dsd++ = 0;
666         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
667         return 0;
668 }
669
670 /*
671  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672  * for Command Type 6.
673  *
674  * @dsds: number of data segment decriptors needed
675  *
676  * Returns the number of dsd list needed to store @dsds.
677  */
678 static inline uint16_t
679 qla24xx_calc_dsd_lists(uint16_t dsds)
680 {
681         uint16_t dsd_lists = 0;
682
683         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684         if (dsds % QLA_DSDS_PER_IOCB)
685                 dsd_lists++;
686         return dsd_lists;
687 }
688
689
690 /**
691  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692  * IOCB types.
693  *
694  * @sp: SRB command to process
695  * @cmd_pkt: Command type 3 IOCB
696  * @tot_dsds: Total number of segments to transfer
697  * @req: pointer to request queue
698  */
699 inline void
700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701         uint16_t tot_dsds, struct req_que *req)
702 {
703         uint16_t        avail_dsds;
704         uint32_t        *cur_dsd;
705         scsi_qla_host_t *vha;
706         struct scsi_cmnd *cmd;
707         struct scatterlist *sg;
708         int i;
709
710         cmd = GET_CMD_SP(sp);
711
712         /* Update entry type to indicate Command Type 3 IOCB */
713         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
714
715         /* No data transfer */
716         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
717                 cmd_pkt->byte_count = cpu_to_le32(0);
718                 return;
719         }
720
721         vha = sp->vha;
722
723         /* Set transfer direction */
724         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
725                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
726                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
727                 vha->qla_stats.output_requests++;
728         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
729                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
730                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
731                 vha->qla_stats.input_requests++;
732         }
733
734         /* One DSD is available in the Command Type 3 IOCB */
735         avail_dsds = 1;
736         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
737
738         /* Load data segments */
739
740         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741                 dma_addr_t      sle_dma;
742                 cont_a64_entry_t *cont_pkt;
743
744                 /* Allocate additional continuation packets? */
745                 if (avail_dsds == 0) {
746                         /*
747                          * Five DSDs are available in the Continuation
748                          * Type 1 IOCB.
749                          */
750                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
752                         avail_dsds = 5;
753                 }
754
755                 sle_dma = sg_dma_address(sg);
756                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
759                 avail_dsds--;
760         }
761 }
762
763 struct fw_dif_context {
764         uint32_t ref_tag;
765         uint16_t app_tag;
766         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
767         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
768 };
769
770 /*
771  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772  *
773  */
774 static inline void
775 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
776     unsigned int protcnt)
777 {
778         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
779
780         switch (scsi_get_prot_type(cmd)) {
781         case SCSI_PROT_DIF_TYPE0:
782                 /*
783                  * No check for ql2xenablehba_err_chk, as it would be an
784                  * I/O error if hba tag generation is not done.
785                  */
786                 pkt->ref_tag = cpu_to_le32((uint32_t)
787                     (0xffffffff & scsi_get_lba(cmd)));
788
789                 if (!qla2x00_hba_err_chk_enabled(sp))
790                         break;
791
792                 pkt->ref_tag_mask[0] = 0xff;
793                 pkt->ref_tag_mask[1] = 0xff;
794                 pkt->ref_tag_mask[2] = 0xff;
795                 pkt->ref_tag_mask[3] = 0xff;
796                 break;
797
798         /*
799          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800          * match LBA in CDB + N
801          */
802         case SCSI_PROT_DIF_TYPE2:
803                 pkt->app_tag = cpu_to_le16(0);
804                 pkt->app_tag_mask[0] = 0x0;
805                 pkt->app_tag_mask[1] = 0x0;
806
807                 pkt->ref_tag = cpu_to_le32((uint32_t)
808                     (0xffffffff & scsi_get_lba(cmd)));
809
810                 if (!qla2x00_hba_err_chk_enabled(sp))
811                         break;
812
813                 /* enable ALL bytes of the ref tag */
814                 pkt->ref_tag_mask[0] = 0xff;
815                 pkt->ref_tag_mask[1] = 0xff;
816                 pkt->ref_tag_mask[2] = 0xff;
817                 pkt->ref_tag_mask[3] = 0xff;
818                 break;
819
820         /* For Type 3 protection: 16 bit GUARD only */
821         case SCSI_PROT_DIF_TYPE3:
822                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
824                                                                 0x00;
825                 break;
826
827         /*
828          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
829          * 16 bit app tag.
830          */
831         case SCSI_PROT_DIF_TYPE1:
832                 pkt->ref_tag = cpu_to_le32((uint32_t)
833                     (0xffffffff & scsi_get_lba(cmd)));
834                 pkt->app_tag = cpu_to_le16(0);
835                 pkt->app_tag_mask[0] = 0x0;
836                 pkt->app_tag_mask[1] = 0x0;
837
838                 if (!qla2x00_hba_err_chk_enabled(sp))
839                         break;
840
841                 /* enable ALL bytes of the ref tag */
842                 pkt->ref_tag_mask[0] = 0xff;
843                 pkt->ref_tag_mask[1] = 0xff;
844                 pkt->ref_tag_mask[2] = 0xff;
845                 pkt->ref_tag_mask[3] = 0xff;
846                 break;
847         }
848 }
849
850 int
851 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
852         uint32_t *partial)
853 {
854         struct scatterlist *sg;
855         uint32_t cumulative_partial, sg_len;
856         dma_addr_t sg_dma_addr;
857
858         if (sgx->num_bytes == sgx->tot_bytes)
859                 return 0;
860
861         sg = sgx->cur_sg;
862         cumulative_partial = sgx->tot_partial;
863
864         sg_dma_addr = sg_dma_address(sg);
865         sg_len = sg_dma_len(sg);
866
867         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
868
869         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870                 sgx->dma_len = (blk_sz - cumulative_partial);
871                 sgx->tot_partial = 0;
872                 sgx->num_bytes += blk_sz;
873                 *partial = 0;
874         } else {
875                 sgx->dma_len = sg_len - sgx->bytes_consumed;
876                 sgx->tot_partial += sgx->dma_len;
877                 *partial = 1;
878         }
879
880         sgx->bytes_consumed += sgx->dma_len;
881
882         if (sg_len == sgx->bytes_consumed) {
883                 sg = sg_next(sg);
884                 sgx->num_sg++;
885                 sgx->cur_sg = sg;
886                 sgx->bytes_consumed = 0;
887         }
888
889         return 1;
890 }
891
892 int
893 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
894         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
895 {
896         void *next_dsd;
897         uint8_t avail_dsds = 0;
898         uint32_t dsd_list_len;
899         struct dsd_dma *dsd_ptr;
900         struct scatterlist *sg_prot;
901         uint32_t *cur_dsd = dsd;
902         uint16_t        used_dsds = tot_dsds;
903         uint32_t        prot_int; /* protection interval */
904         uint32_t        partial;
905         struct qla2_sgx sgx;
906         dma_addr_t      sle_dma;
907         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
908         struct scsi_cmnd *cmd;
909
910         memset(&sgx, 0, sizeof(struct qla2_sgx));
911         if (sp) {
912                 cmd = GET_CMD_SP(sp);
913                 prot_int = cmd->device->sector_size;
914
915                 sgx.tot_bytes = scsi_bufflen(cmd);
916                 sgx.cur_sg = scsi_sglist(cmd);
917                 sgx.sp = sp;
918
919                 sg_prot = scsi_prot_sglist(cmd);
920         } else if (tc) {
921                 prot_int      = tc->blk_sz;
922                 sgx.tot_bytes = tc->bufflen;
923                 sgx.cur_sg    = tc->sg;
924                 sg_prot       = tc->prot_sg;
925         } else {
926                 BUG();
927                 return 1;
928         }
929
930         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
931
932                 sle_dma = sgx.dma_addr;
933                 sle_dma_len = sgx.dma_len;
934 alloc_and_fill:
935                 /* Allocate additional continuation packets? */
936                 if (avail_dsds == 0) {
937                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938                                         QLA_DSDS_PER_IOCB : used_dsds;
939                         dsd_list_len = (avail_dsds + 1) * 12;
940                         used_dsds -= avail_dsds;
941
942                         /* allocate tracking DS */
943                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
944                         if (!dsd_ptr)
945                                 return 1;
946
947                         /* allocate new list */
948                         dsd_ptr->dsd_addr = next_dsd =
949                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950                                 &dsd_ptr->dsd_list_dma);
951
952                         if (!next_dsd) {
953                                 /*
954                                  * Need to cleanup only this dsd_ptr, rest
955                                  * will be done by sp_free_dma()
956                                  */
957                                 kfree(dsd_ptr);
958                                 return 1;
959                         }
960
961                         if (sp) {
962                                 list_add_tail(&dsd_ptr->list,
963                                     &((struct crc_context *)
964                                             sp->u.scmd.ctx)->dsd_list);
965
966                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
967                         } else {
968                                 list_add_tail(&dsd_ptr->list,
969                                     &(tc->ctx->dsd_list));
970                                 *tc->ctx_dsd_alloced = 1;
971                         }
972
973
974                         /* add new list to cmd iocb or last list */
975                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977                         *cur_dsd++ = dsd_list_len;
978                         cur_dsd = (uint32_t *)next_dsd;
979                 }
980                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
983                 avail_dsds--;
984
985                 if (partial == 0) {
986                         /* Got a full protection interval */
987                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
988                         sle_dma_len = 8;
989
990                         tot_prot_dma_len += sle_dma_len;
991                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992                                 tot_prot_dma_len = 0;
993                                 sg_prot = sg_next(sg_prot);
994                         }
995
996                         partial = 1; /* So as to not re-enter this block */
997                         goto alloc_and_fill;
998                 }
999         }
1000         /* Null termination */
1001         *cur_dsd++ = 0;
1002         *cur_dsd++ = 0;
1003         *cur_dsd++ = 0;
1004         return 0;
1005 }
1006
1007 int
1008 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1009         uint16_t tot_dsds, struct qla_tc_param *tc)
1010 {
1011         void *next_dsd;
1012         uint8_t avail_dsds = 0;
1013         uint32_t dsd_list_len;
1014         struct dsd_dma *dsd_ptr;
1015         struct scatterlist *sg, *sgl;
1016         uint32_t *cur_dsd = dsd;
1017         int     i;
1018         uint16_t        used_dsds = tot_dsds;
1019         struct scsi_cmnd *cmd;
1020
1021         if (sp) {
1022                 cmd = GET_CMD_SP(sp);
1023                 sgl = scsi_sglist(cmd);
1024         } else if (tc) {
1025                 sgl = tc->sg;
1026         } else {
1027                 BUG();
1028                 return 1;
1029         }
1030
1031
1032         for_each_sg(sgl, sg, tot_dsds, i) {
1033                 dma_addr_t      sle_dma;
1034
1035                 /* Allocate additional continuation packets? */
1036                 if (avail_dsds == 0) {
1037                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038                                         QLA_DSDS_PER_IOCB : used_dsds;
1039                         dsd_list_len = (avail_dsds + 1) * 12;
1040                         used_dsds -= avail_dsds;
1041
1042                         /* allocate tracking DS */
1043                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044                         if (!dsd_ptr)
1045                                 return 1;
1046
1047                         /* allocate new list */
1048                         dsd_ptr->dsd_addr = next_dsd =
1049                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050                                 &dsd_ptr->dsd_list_dma);
1051
1052                         if (!next_dsd) {
1053                                 /*
1054                                  * Need to cleanup only this dsd_ptr, rest
1055                                  * will be done by sp_free_dma()
1056                                  */
1057                                 kfree(dsd_ptr);
1058                                 return 1;
1059                         }
1060
1061                         if (sp) {
1062                                 list_add_tail(&dsd_ptr->list,
1063                                     &((struct crc_context *)
1064                                             sp->u.scmd.ctx)->dsd_list);
1065
1066                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067                         } else {
1068                                 list_add_tail(&dsd_ptr->list,
1069                                     &(tc->ctx->dsd_list));
1070                                 *tc->ctx_dsd_alloced = 1;
1071                         }
1072
1073                         /* add new list to cmd iocb or last list */
1074                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076                         *cur_dsd++ = dsd_list_len;
1077                         cur_dsd = (uint32_t *)next_dsd;
1078                 }
1079                 sle_dma = sg_dma_address(sg);
1080
1081                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084                 avail_dsds--;
1085
1086         }
1087         /* Null termination */
1088         *cur_dsd++ = 0;
1089         *cur_dsd++ = 0;
1090         *cur_dsd++ = 0;
1091         return 0;
1092 }
1093
1094 int
1095 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1096         uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1097 {
1098         void *next_dsd;
1099         uint8_t avail_dsds = 0;
1100         uint32_t dsd_list_len;
1101         struct dsd_dma *dsd_ptr;
1102         struct scatterlist *sg, *sgl;
1103         int     i;
1104         struct scsi_cmnd *cmd;
1105         uint32_t *cur_dsd = dsd;
1106         uint16_t used_dsds = tot_dsds;
1107         struct scsi_qla_host *vha;
1108
1109         if (sp) {
1110                 cmd = GET_CMD_SP(sp);
1111                 sgl = scsi_prot_sglist(cmd);
1112                 vha = sp->vha;
1113         } else if (tc) {
1114                 vha = tc->vha;
1115                 sgl = tc->prot_sg;
1116         } else {
1117                 BUG();
1118                 return 1;
1119         }
1120
1121         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122                 "%s: enter\n", __func__);
1123
1124         for_each_sg(sgl, sg, tot_dsds, i) {
1125                 dma_addr_t      sle_dma;
1126
1127                 /* Allocate additional continuation packets? */
1128                 if (avail_dsds == 0) {
1129                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130                                                 QLA_DSDS_PER_IOCB : used_dsds;
1131                         dsd_list_len = (avail_dsds + 1) * 12;
1132                         used_dsds -= avail_dsds;
1133
1134                         /* allocate tracking DS */
1135                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136                         if (!dsd_ptr)
1137                                 return 1;
1138
1139                         /* allocate new list */
1140                         dsd_ptr->dsd_addr = next_dsd =
1141                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142                                 &dsd_ptr->dsd_list_dma);
1143
1144                         if (!next_dsd) {
1145                                 /*
1146                                  * Need to cleanup only this dsd_ptr, rest
1147                                  * will be done by sp_free_dma()
1148                                  */
1149                                 kfree(dsd_ptr);
1150                                 return 1;
1151                         }
1152
1153                         if (sp) {
1154                                 list_add_tail(&dsd_ptr->list,
1155                                     &((struct crc_context *)
1156                                             sp->u.scmd.ctx)->dsd_list);
1157
1158                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159                         } else {
1160                                 list_add_tail(&dsd_ptr->list,
1161                                     &(tc->ctx->dsd_list));
1162                                 *tc->ctx_dsd_alloced = 1;
1163                         }
1164
1165                         /* add new list to cmd iocb or last list */
1166                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168                         *cur_dsd++ = dsd_list_len;
1169                         cur_dsd = (uint32_t *)next_dsd;
1170                 }
1171                 sle_dma = sg_dma_address(sg);
1172
1173                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1176
1177                 avail_dsds--;
1178         }
1179         /* Null termination */
1180         *cur_dsd++ = 0;
1181         *cur_dsd++ = 0;
1182         *cur_dsd++ = 0;
1183         return 0;
1184 }
1185
1186 /**
1187  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188  *                                                      Type 6 IOCB types.
1189  *
1190  * @sp: SRB command to process
1191  * @cmd_pkt: Command type 3 IOCB
1192  * @tot_dsds: Total number of segments to transfer
1193  */
1194 inline int
1195 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197 {
1198         uint32_t                *cur_dsd, *fcp_dl;
1199         scsi_qla_host_t         *vha;
1200         struct scsi_cmnd        *cmd;
1201         uint32_t                total_bytes = 0;
1202         uint32_t                data_bytes;
1203         uint32_t                dif_bytes;
1204         uint8_t                 bundling = 1;
1205         uint16_t                blk_size;
1206         uint8_t                 *clr_ptr;
1207         struct crc_context      *crc_ctx_pkt = NULL;
1208         struct qla_hw_data      *ha;
1209         uint8_t                 additional_fcpcdb_len;
1210         uint16_t                fcp_cmnd_len;
1211         struct fcp_cmnd         *fcp_cmnd;
1212         dma_addr_t              crc_ctx_dma;
1213
1214         cmd = GET_CMD_SP(sp);
1215
1216         /* Update entry type to indicate Command Type CRC_2 IOCB */
1217         *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1218
1219         vha = sp->vha;
1220         ha = vha->hw;
1221
1222         /* No data transfer */
1223         data_bytes = scsi_bufflen(cmd);
1224         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1225                 cmd_pkt->byte_count = cpu_to_le32(0);
1226                 return QLA_SUCCESS;
1227         }
1228
1229         cmd_pkt->vp_index = sp->vha->vp_idx;
1230
1231         /* Set transfer direction */
1232         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233                 cmd_pkt->control_flags =
1234                     cpu_to_le16(CF_WRITE_DATA);
1235         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236                 cmd_pkt->control_flags =
1237                     cpu_to_le16(CF_READ_DATA);
1238         }
1239
1240         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1244                 bundling = 0;
1245
1246         /* Allocate CRC context from global pool */
1247         crc_ctx_pkt = sp->u.scmd.ctx =
1248             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1249
1250         if (!crc_ctx_pkt)
1251                 goto crc_queuing_error;
1252
1253         /* Zero out CTX area. */
1254         clr_ptr = (uint8_t *)crc_ctx_pkt;
1255         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1256
1257         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1258
1259         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1260
1261         /* Set handle */
1262         crc_ctx_pkt->handle = cmd_pkt->handle;
1263
1264         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1265
1266         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1267             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1268
1269         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1272
1273         /* Determine SCSI command length -- align to 4 byte boundary */
1274         if (cmd->cmd_len > 16) {
1275                 additional_fcpcdb_len = cmd->cmd_len - 16;
1276                 if ((cmd->cmd_len % 4) != 0) {
1277                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1278                         goto crc_queuing_error;
1279                 }
1280                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281         } else {
1282                 additional_fcpcdb_len = 0;
1283                 fcp_cmnd_len = 12 + 16 + 4;
1284         }
1285
1286         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1287
1288         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290                 fcp_cmnd->additional_cdb_len |= 1;
1291         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292                 fcp_cmnd->additional_cdb_len |= 2;
1293
1294         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1295         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301         fcp_cmnd->task_management = 0;
1302         fcp_cmnd->task_attribute = TSK_SIMPLE;
1303
1304         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1305
1306         /* Compute dif len and adjust data len to incude protection */
1307         dif_bytes = 0;
1308         blk_size = cmd->device->sector_size;
1309         dif_bytes = (data_bytes / blk_size) * 8;
1310
1311         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1312         case SCSI_PROT_READ_INSERT:
1313         case SCSI_PROT_WRITE_STRIP:
1314             total_bytes = data_bytes;
1315             data_bytes += dif_bytes;
1316             break;
1317
1318         case SCSI_PROT_READ_STRIP:
1319         case SCSI_PROT_WRITE_INSERT:
1320         case SCSI_PROT_READ_PASS:
1321         case SCSI_PROT_WRITE_PASS:
1322             total_bytes = data_bytes + dif_bytes;
1323             break;
1324         default:
1325             BUG();
1326         }
1327
1328         if (!qla2x00_hba_err_chk_enabled(sp))
1329                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1330         /* HBA error checking enabled */
1331         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334                         SCSI_PROT_DIF_TYPE2))
1335                         fw_prot_opts |= BIT_10;
1336                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337                     SCSI_PROT_DIF_TYPE3)
1338                         fw_prot_opts |= BIT_11;
1339         }
1340
1341         if (!bundling) {
1342                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343         } else {
1344                 /*
1345                  * Configure Bundling if we need to fetch interlaving
1346                  * protection PCI accesses
1347                  */
1348                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351                                                         tot_prot_dsds);
1352                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1353         }
1354
1355         /* Finish the common fields of CRC pkt */
1356         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1359         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1360         /* Fibre channel byte count */
1361         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363             additional_fcpcdb_len);
1364         *fcp_dl = htonl(total_bytes);
1365
1366         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1367                 cmd_pkt->byte_count = cpu_to_le32(0);
1368                 return QLA_SUCCESS;
1369         }
1370         /* Walks data segments */
1371
1372         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1373
1374         if (!bundling && tot_prot_dsds) {
1375                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1376                         cur_dsd, tot_dsds, NULL))
1377                         goto crc_queuing_error;
1378         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1379                         (tot_dsds - tot_prot_dsds), NULL))
1380                 goto crc_queuing_error;
1381
1382         if (bundling && tot_prot_dsds) {
1383                 /* Walks dif segments */
1384                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1385                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1387                                 tot_prot_dsds, NULL))
1388                         goto crc_queuing_error;
1389         }
1390         return QLA_SUCCESS;
1391
1392 crc_queuing_error:
1393         /* Cleanup will be performed by the caller */
1394
1395         return QLA_FUNCTION_FAILED;
1396 }
1397
1398 /**
1399  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400  * @sp: command to send to the ISP
1401  *
1402  * Returns non-zero if a failure occurred, else zero.
1403  */
1404 int
1405 qla24xx_start_scsi(srb_t *sp)
1406 {
1407         int             nseg;
1408         unsigned long   flags;
1409         uint32_t        *clr_ptr;
1410         uint32_t        index;
1411         uint32_t        handle;
1412         struct cmd_type_7 *cmd_pkt;
1413         uint16_t        cnt;
1414         uint16_t        req_cnt;
1415         uint16_t        tot_dsds;
1416         struct req_que *req = NULL;
1417         struct rsp_que *rsp = NULL;
1418         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1419         struct scsi_qla_host *vha = sp->vha;
1420         struct qla_hw_data *ha = vha->hw;
1421
1422         /* Setup device pointers. */
1423         req = vha->req;
1424         rsp = req->rsp;
1425
1426         /* So we know we haven't pci_map'ed anything yet */
1427         tot_dsds = 0;
1428
1429         /* Send marker if required */
1430         if (vha->marker_needed != 0) {
1431                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432                     QLA_SUCCESS)
1433                         return QLA_FUNCTION_FAILED;
1434                 vha->marker_needed = 0;
1435         }
1436
1437         /* Acquire ring specific lock */
1438         spin_lock_irqsave(&ha->hardware_lock, flags);
1439
1440         /* Check for room in outstanding command list. */
1441         handle = req->current_outstanding_cmd;
1442         for (index = 1; index < req->num_outstanding_cmds; index++) {
1443                 handle++;
1444                 if (handle == req->num_outstanding_cmds)
1445                         handle = 1;
1446                 if (!req->outstanding_cmds[handle])
1447                         break;
1448         }
1449         if (index == req->num_outstanding_cmds)
1450                 goto queuing_error;
1451
1452         /* Map the sg table so we have an accurate count of sg entries needed */
1453         if (scsi_sg_count(cmd)) {
1454                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455                     scsi_sg_count(cmd), cmd->sc_data_direction);
1456                 if (unlikely(!nseg))
1457                         goto queuing_error;
1458         } else
1459                 nseg = 0;
1460
1461         tot_dsds = nseg;
1462         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1463         if (req->cnt < (req_cnt + 2)) {
1464                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465                     RD_REG_DWORD_RELAXED(req->req_q_out);
1466                 if (req->ring_index < cnt)
1467                         req->cnt = cnt - req->ring_index;
1468                 else
1469                         req->cnt = req->length -
1470                                 (req->ring_index - cnt);
1471                 if (req->cnt < (req_cnt + 2))
1472                         goto queuing_error;
1473         }
1474
1475         /* Build command packet. */
1476         req->current_outstanding_cmd = handle;
1477         req->outstanding_cmds[handle] = sp;
1478         sp->handle = handle;
1479         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1480         req->cnt -= req_cnt;
1481
1482         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1483         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1484
1485         /* Zero out remaining portion of packet. */
1486         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1487         clr_ptr = (uint32_t *)cmd_pkt + 2;
1488         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1490
1491         /* Set NPORT-ID and LUN number*/
1492         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1496         cmd_pkt->vp_index = sp->vha->vp_idx;
1497
1498         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1499         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1500
1501         cmd_pkt->task = TSK_SIMPLE;
1502
1503         /* Load SCSI command packet. */
1504         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1506
1507         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1508
1509         /* Build IOCB segments */
1510         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1511
1512         /* Set total data segment count. */
1513         cmd_pkt->entry_count = (uint8_t)req_cnt;
1514         wmb();
1515         /* Adjust ring index. */
1516         req->ring_index++;
1517         if (req->ring_index == req->length) {
1518                 req->ring_index = 0;
1519                 req->ring_ptr = req->ring;
1520         } else
1521                 req->ring_ptr++;
1522
1523         sp->flags |= SRB_DMA_VALID;
1524
1525         /* Set chip new ring index. */
1526         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1528
1529         /* Manage unprocessed RIO/ZIO commands in response queue. */
1530         if (vha->flags.process_response_queue &&
1531                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1532                 qla24xx_process_response_queue(vha, rsp);
1533
1534         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1535         return QLA_SUCCESS;
1536
1537 queuing_error:
1538         if (tot_dsds)
1539                 scsi_dma_unmap(cmd);
1540
1541         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1542
1543         return QLA_FUNCTION_FAILED;
1544 }
1545
1546 /**
1547  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548  * @sp: command to send to the ISP
1549  *
1550  * Returns non-zero if a failure occurred, else zero.
1551  */
1552 int
1553 qla24xx_dif_start_scsi(srb_t *sp)
1554 {
1555         int                     nseg;
1556         unsigned long           flags;
1557         uint32_t                *clr_ptr;
1558         uint32_t                index;
1559         uint32_t                handle;
1560         uint16_t                cnt;
1561         uint16_t                req_cnt = 0;
1562         uint16_t                tot_dsds;
1563         uint16_t                tot_prot_dsds;
1564         uint16_t                fw_prot_opts = 0;
1565         struct req_que          *req = NULL;
1566         struct rsp_que          *rsp = NULL;
1567         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1568         struct scsi_qla_host    *vha = sp->vha;
1569         struct qla_hw_data      *ha = vha->hw;
1570         struct cmd_type_crc_2   *cmd_pkt;
1571         uint32_t                status = 0;
1572
1573 #define QDSS_GOT_Q_SPACE        BIT_0
1574
1575         /* Only process protection or >16 cdb in this routine */
1576         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577                 if (cmd->cmd_len <= 16)
1578                         return qla24xx_start_scsi(sp);
1579         }
1580
1581         /* Setup device pointers. */
1582         req = vha->req;
1583         rsp = req->rsp;
1584
1585         /* So we know we haven't pci_map'ed anything yet */
1586         tot_dsds = 0;
1587
1588         /* Send marker if required */
1589         if (vha->marker_needed != 0) {
1590                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1591                     QLA_SUCCESS)
1592                         return QLA_FUNCTION_FAILED;
1593                 vha->marker_needed = 0;
1594         }
1595
1596         /* Acquire ring specific lock */
1597         spin_lock_irqsave(&ha->hardware_lock, flags);
1598
1599         /* Check for room in outstanding command list. */
1600         handle = req->current_outstanding_cmd;
1601         for (index = 1; index < req->num_outstanding_cmds; index++) {
1602                 handle++;
1603                 if (handle == req->num_outstanding_cmds)
1604                         handle = 1;
1605                 if (!req->outstanding_cmds[handle])
1606                         break;
1607         }
1608
1609         if (index == req->num_outstanding_cmds)
1610                 goto queuing_error;
1611
1612         /* Compute number of required data segments */
1613         /* Map the sg table so we have an accurate count of sg entries needed */
1614         if (scsi_sg_count(cmd)) {
1615                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616                     scsi_sg_count(cmd), cmd->sc_data_direction);
1617                 if (unlikely(!nseg))
1618                         goto queuing_error;
1619                 else
1620                         sp->flags |= SRB_DMA_VALID;
1621
1622                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624                         struct qla2_sgx sgx;
1625                         uint32_t        partial;
1626
1627                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1628                         sgx.tot_bytes = scsi_bufflen(cmd);
1629                         sgx.cur_sg = scsi_sglist(cmd);
1630                         sgx.sp = sp;
1631
1632                         nseg = 0;
1633                         while (qla24xx_get_one_block_sg(
1634                             cmd->device->sector_size, &sgx, &partial))
1635                                 nseg++;
1636                 }
1637         } else
1638                 nseg = 0;
1639
1640         /* number of required data segments */
1641         tot_dsds = nseg;
1642
1643         /* Compute number of required protection segments */
1644         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647                 if (unlikely(!nseg))
1648                         goto queuing_error;
1649                 else
1650                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1651
1652                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1655                 }
1656         } else {
1657                 nseg = 0;
1658         }
1659
1660         req_cnt = 1;
1661         /* Total Data and protection sg segment(s) */
1662         tot_prot_dsds = nseg;
1663         tot_dsds += nseg;
1664         if (req->cnt < (req_cnt + 2)) {
1665                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666                     RD_REG_DWORD_RELAXED(req->req_q_out);
1667                 if (req->ring_index < cnt)
1668                         req->cnt = cnt - req->ring_index;
1669                 else
1670                         req->cnt = req->length -
1671                                 (req->ring_index - cnt);
1672                 if (req->cnt < (req_cnt + 2))
1673                         goto queuing_error;
1674         }
1675
1676         status |= QDSS_GOT_Q_SPACE;
1677
1678         /* Build header part of command packet (excluding the OPCODE). */
1679         req->current_outstanding_cmd = handle;
1680         req->outstanding_cmds[handle] = sp;
1681         sp->handle = handle;
1682         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1683         req->cnt -= req_cnt;
1684
1685         /* Fill-in common area */
1686         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1688
1689         clr_ptr = (uint32_t *)cmd_pkt + 2;
1690         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1691
1692         /* Set NPORT-ID and LUN number*/
1693         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1697
1698         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1699         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1700
1701         /* Total Data and protection segment(s) */
1702         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1703
1704         /* Build IOCB segments and adjust for data protection segments */
1705         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1707                 QLA_SUCCESS)
1708                 goto queuing_error;
1709
1710         cmd_pkt->entry_count = (uint8_t)req_cnt;
1711         /* Specify response queue number where completion should happen */
1712         cmd_pkt->entry_status = (uint8_t) rsp->id;
1713         cmd_pkt->timeout = cpu_to_le16(0);
1714         wmb();
1715
1716         /* Adjust ring index. */
1717         req->ring_index++;
1718         if (req->ring_index == req->length) {
1719                 req->ring_index = 0;
1720                 req->ring_ptr = req->ring;
1721         } else
1722                 req->ring_ptr++;
1723
1724         /* Set chip new ring index. */
1725         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1727
1728         /* Manage unprocessed RIO/ZIO commands in response queue. */
1729         if (vha->flags.process_response_queue &&
1730             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731                 qla24xx_process_response_queue(vha, rsp);
1732
1733         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1734
1735         return QLA_SUCCESS;
1736
1737 queuing_error:
1738         if (status & QDSS_GOT_Q_SPACE) {
1739                 req->outstanding_cmds[handle] = NULL;
1740                 req->cnt += req_cnt;
1741         }
1742         /* Cleanup will be performed by the caller (queuecommand) */
1743
1744         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1745         return QLA_FUNCTION_FAILED;
1746 }
1747
1748 /**
1749  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750  * @sp: command to send to the ISP
1751  *
1752  * Returns non-zero if a failure occurred, else zero.
1753  */
1754 static int
1755 qla2xxx_start_scsi_mq(srb_t *sp)
1756 {
1757         int             nseg;
1758         unsigned long   flags;
1759         uint32_t        *clr_ptr;
1760         uint32_t        index;
1761         uint32_t        handle;
1762         struct cmd_type_7 *cmd_pkt;
1763         uint16_t        cnt;
1764         uint16_t        req_cnt;
1765         uint16_t        tot_dsds;
1766         struct req_que *req = NULL;
1767         struct rsp_que *rsp = NULL;
1768         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1769         struct scsi_qla_host *vha = sp->fcport->vha;
1770         struct qla_hw_data *ha = vha->hw;
1771         struct qla_qpair *qpair = sp->qpair;
1772
1773         /* Acquire qpair specific lock */
1774         spin_lock_irqsave(&qpair->qp_lock, flags);
1775
1776         /* Setup qpair pointers */
1777         rsp = qpair->rsp;
1778         req = qpair->req;
1779
1780         /* So we know we haven't pci_map'ed anything yet */
1781         tot_dsds = 0;
1782
1783         /* Send marker if required */
1784         if (vha->marker_needed != 0) {
1785                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1786                     QLA_SUCCESS) {
1787                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1788                         return QLA_FUNCTION_FAILED;
1789                 }
1790                 vha->marker_needed = 0;
1791         }
1792
1793         /* Check for room in outstanding command list. */
1794         handle = req->current_outstanding_cmd;
1795         for (index = 1; index < req->num_outstanding_cmds; index++) {
1796                 handle++;
1797                 if (handle == req->num_outstanding_cmds)
1798                         handle = 1;
1799                 if (!req->outstanding_cmds[handle])
1800                         break;
1801         }
1802         if (index == req->num_outstanding_cmds)
1803                 goto queuing_error;
1804
1805         /* Map the sg table so we have an accurate count of sg entries needed */
1806         if (scsi_sg_count(cmd)) {
1807                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1808                     scsi_sg_count(cmd), cmd->sc_data_direction);
1809                 if (unlikely(!nseg))
1810                         goto queuing_error;
1811         } else
1812                 nseg = 0;
1813
1814         tot_dsds = nseg;
1815         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1816         if (req->cnt < (req_cnt + 2)) {
1817                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1818                     RD_REG_DWORD_RELAXED(req->req_q_out);
1819                 if (req->ring_index < cnt)
1820                         req->cnt = cnt - req->ring_index;
1821                 else
1822                         req->cnt = req->length -
1823                                 (req->ring_index - cnt);
1824                 if (req->cnt < (req_cnt + 2))
1825                         goto queuing_error;
1826         }
1827
1828         /* Build command packet. */
1829         req->current_outstanding_cmd = handle;
1830         req->outstanding_cmds[handle] = sp;
1831         sp->handle = handle;
1832         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1833         req->cnt -= req_cnt;
1834
1835         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1836         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1837
1838         /* Zero out remaining portion of packet. */
1839         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1840         clr_ptr = (uint32_t *)cmd_pkt + 2;
1841         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1842         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1843
1844         /* Set NPORT-ID and LUN number*/
1845         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1846         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1847         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1848         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1849         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1850
1851         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1852         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1853
1854         cmd_pkt->task = TSK_SIMPLE;
1855
1856         /* Load SCSI command packet. */
1857         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1858         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1859
1860         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1861
1862         /* Build IOCB segments */
1863         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1864
1865         /* Set total data segment count. */
1866         cmd_pkt->entry_count = (uint8_t)req_cnt;
1867         wmb();
1868         /* Adjust ring index. */
1869         req->ring_index++;
1870         if (req->ring_index == req->length) {
1871                 req->ring_index = 0;
1872                 req->ring_ptr = req->ring;
1873         } else
1874                 req->ring_ptr++;
1875
1876         sp->flags |= SRB_DMA_VALID;
1877
1878         /* Set chip new ring index. */
1879         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1880
1881         /* Manage unprocessed RIO/ZIO commands in response queue. */
1882         if (vha->flags.process_response_queue &&
1883                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1884                 qla24xx_process_response_queue(vha, rsp);
1885
1886         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1887         return QLA_SUCCESS;
1888
1889 queuing_error:
1890         if (tot_dsds)
1891                 scsi_dma_unmap(cmd);
1892
1893         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1894
1895         return QLA_FUNCTION_FAILED;
1896 }
1897
1898
1899 /**
1900  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1901  * @sp: command to send to the ISP
1902  *
1903  * Returns non-zero if a failure occurred, else zero.
1904  */
1905 int
1906 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1907 {
1908         int                     nseg;
1909         unsigned long           flags;
1910         uint32_t                *clr_ptr;
1911         uint32_t                index;
1912         uint32_t                handle;
1913         uint16_t                cnt;
1914         uint16_t                req_cnt = 0;
1915         uint16_t                tot_dsds;
1916         uint16_t                tot_prot_dsds;
1917         uint16_t                fw_prot_opts = 0;
1918         struct req_que          *req = NULL;
1919         struct rsp_que          *rsp = NULL;
1920         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1921         struct scsi_qla_host    *vha = sp->fcport->vha;
1922         struct qla_hw_data      *ha = vha->hw;
1923         struct cmd_type_crc_2   *cmd_pkt;
1924         uint32_t                status = 0;
1925         struct qla_qpair        *qpair = sp->qpair;
1926
1927 #define QDSS_GOT_Q_SPACE        BIT_0
1928
1929         /* Check for host side state */
1930         if (!qpair->online) {
1931                 cmd->result = DID_NO_CONNECT << 16;
1932                 return QLA_INTERFACE_ERROR;
1933         }
1934
1935         if (!qpair->difdix_supported &&
1936                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1937                 cmd->result = DID_NO_CONNECT << 16;
1938                 return QLA_INTERFACE_ERROR;
1939         }
1940
1941         /* Only process protection or >16 cdb in this routine */
1942         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1943                 if (cmd->cmd_len <= 16)
1944                         return qla2xxx_start_scsi_mq(sp);
1945         }
1946
1947         spin_lock_irqsave(&qpair->qp_lock, flags);
1948
1949         /* Setup qpair pointers */
1950         rsp = qpair->rsp;
1951         req = qpair->req;
1952
1953         /* So we know we haven't pci_map'ed anything yet */
1954         tot_dsds = 0;
1955
1956         /* Send marker if required */
1957         if (vha->marker_needed != 0) {
1958                 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1959                     QLA_SUCCESS) {
1960                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1961                         return QLA_FUNCTION_FAILED;
1962                 }
1963                 vha->marker_needed = 0;
1964         }
1965
1966         /* Check for room in outstanding command list. */
1967         handle = req->current_outstanding_cmd;
1968         for (index = 1; index < req->num_outstanding_cmds; index++) {
1969                 handle++;
1970                 if (handle == req->num_outstanding_cmds)
1971                         handle = 1;
1972                 if (!req->outstanding_cmds[handle])
1973                         break;
1974         }
1975
1976         if (index == req->num_outstanding_cmds)
1977                 goto queuing_error;
1978
1979         /* Compute number of required data segments */
1980         /* Map the sg table so we have an accurate count of sg entries needed */
1981         if (scsi_sg_count(cmd)) {
1982                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1983                     scsi_sg_count(cmd), cmd->sc_data_direction);
1984                 if (unlikely(!nseg))
1985                         goto queuing_error;
1986                 else
1987                         sp->flags |= SRB_DMA_VALID;
1988
1989                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1990                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1991                         struct qla2_sgx sgx;
1992                         uint32_t        partial;
1993
1994                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1995                         sgx.tot_bytes = scsi_bufflen(cmd);
1996                         sgx.cur_sg = scsi_sglist(cmd);
1997                         sgx.sp = sp;
1998
1999                         nseg = 0;
2000                         while (qla24xx_get_one_block_sg(
2001                             cmd->device->sector_size, &sgx, &partial))
2002                                 nseg++;
2003                 }
2004         } else
2005                 nseg = 0;
2006
2007         /* number of required data segments */
2008         tot_dsds = nseg;
2009
2010         /* Compute number of required protection segments */
2011         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2012                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2013                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2014                 if (unlikely(!nseg))
2015                         goto queuing_error;
2016                 else
2017                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2018
2019                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2020                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2021                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2022                 }
2023         } else {
2024                 nseg = 0;
2025         }
2026
2027         req_cnt = 1;
2028         /* Total Data and protection sg segment(s) */
2029         tot_prot_dsds = nseg;
2030         tot_dsds += nseg;
2031         if (req->cnt < (req_cnt + 2)) {
2032                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2033                     RD_REG_DWORD_RELAXED(req->req_q_out);
2034                 if (req->ring_index < cnt)
2035                         req->cnt = cnt - req->ring_index;
2036                 else
2037                         req->cnt = req->length -
2038                                 (req->ring_index - cnt);
2039                 if (req->cnt < (req_cnt + 2))
2040                         goto queuing_error;
2041         }
2042
2043         status |= QDSS_GOT_Q_SPACE;
2044
2045         /* Build header part of command packet (excluding the OPCODE). */
2046         req->current_outstanding_cmd = handle;
2047         req->outstanding_cmds[handle] = sp;
2048         sp->handle = handle;
2049         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2050         req->cnt -= req_cnt;
2051
2052         /* Fill-in common area */
2053         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2054         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2055
2056         clr_ptr = (uint32_t *)cmd_pkt + 2;
2057         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2058
2059         /* Set NPORT-ID and LUN number*/
2060         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2061         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2062         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2063         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2064
2065         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2066         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2067
2068         /* Total Data and protection segment(s) */
2069         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2070
2071         /* Build IOCB segments and adjust for data protection segments */
2072         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2073             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2074                 QLA_SUCCESS)
2075                 goto queuing_error;
2076
2077         cmd_pkt->entry_count = (uint8_t)req_cnt;
2078         cmd_pkt->timeout = cpu_to_le16(0);
2079         wmb();
2080
2081         /* Adjust ring index. */
2082         req->ring_index++;
2083         if (req->ring_index == req->length) {
2084                 req->ring_index = 0;
2085                 req->ring_ptr = req->ring;
2086         } else
2087                 req->ring_ptr++;
2088
2089         /* Set chip new ring index. */
2090         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2091
2092         /* Manage unprocessed RIO/ZIO commands in response queue. */
2093         if (vha->flags.process_response_queue &&
2094             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2095                 qla24xx_process_response_queue(vha, rsp);
2096
2097         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2098
2099         return QLA_SUCCESS;
2100
2101 queuing_error:
2102         if (status & QDSS_GOT_Q_SPACE) {
2103                 req->outstanding_cmds[handle] = NULL;
2104                 req->cnt += req_cnt;
2105         }
2106         /* Cleanup will be performed by the caller (queuecommand) */
2107
2108         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2109         return QLA_FUNCTION_FAILED;
2110 }
2111
2112 /* Generic Control-SRB manipulation functions. */
2113
2114 /* hardware_lock assumed to be held. */
2115
2116 void *
2117 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2118 {
2119         scsi_qla_host_t *vha = qpair->vha;
2120         struct qla_hw_data *ha = vha->hw;
2121         struct req_que *req = qpair->req;
2122         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2123         uint32_t index, handle;
2124         request_t *pkt;
2125         uint16_t cnt, req_cnt;
2126
2127         pkt = NULL;
2128         req_cnt = 1;
2129         handle = 0;
2130
2131         if (!sp)
2132                 goto skip_cmd_array;
2133
2134         /* Check for room in outstanding command list. */
2135         handle = req->current_outstanding_cmd;
2136         for (index = 1; index < req->num_outstanding_cmds; index++) {
2137                 handle++;
2138                 if (handle == req->num_outstanding_cmds)
2139                         handle = 1;
2140                 if (!req->outstanding_cmds[handle])
2141                         break;
2142         }
2143         if (index == req->num_outstanding_cmds) {
2144                 ql_log(ql_log_warn, vha, 0x700b,
2145                     "No room on outstanding cmd array.\n");
2146                 goto queuing_error;
2147         }
2148
2149         /* Prep command array. */
2150         req->current_outstanding_cmd = handle;
2151         req->outstanding_cmds[handle] = sp;
2152         sp->handle = handle;
2153
2154         /* Adjust entry-counts as needed. */
2155         if (sp->type != SRB_SCSI_CMD)
2156                 req_cnt = sp->iocbs;
2157
2158 skip_cmd_array:
2159         /* Check for room on request queue. */
2160         if (req->cnt < req_cnt + 2) {
2161                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2162                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2163                 else if (IS_P3P_TYPE(ha))
2164                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2165                 else if (IS_FWI2_CAPABLE(ha))
2166                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2167                 else if (IS_QLAFX00(ha))
2168                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2169                 else
2170                         cnt = qla2x00_debounce_register(
2171                             ISP_REQ_Q_OUT(ha, &reg->isp));
2172
2173                 if  (req->ring_index < cnt)
2174                         req->cnt = cnt - req->ring_index;
2175                 else
2176                         req->cnt = req->length -
2177                             (req->ring_index - cnt);
2178         }
2179         if (req->cnt < req_cnt + 2)
2180                 goto queuing_error;
2181
2182         /* Prep packet */
2183         req->cnt -= req_cnt;
2184         pkt = req->ring_ptr;
2185         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2186         if (IS_QLAFX00(ha)) {
2187                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2188                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2189         } else {
2190                 pkt->entry_count = req_cnt;
2191                 pkt->handle = handle;
2192         }
2193
2194 queuing_error:
2195         qpair->tgt_counters.num_alloc_iocb_failed++;
2196         return pkt;
2197 }
2198
2199 void *
2200 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2201 {
2202         scsi_qla_host_t *vha = qpair->vha;
2203
2204         if (qla2x00_reset_active(vha))
2205                 return NULL;
2206
2207         return __qla2x00_alloc_iocbs(qpair, sp);
2208 }
2209
2210 void *
2211 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2212 {
2213         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2214 }
2215
2216 static void
2217 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2218 {
2219         struct srb_iocb *lio = &sp->u.iocb_cmd;
2220
2221         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2222         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2223         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2224                 logio->control_flags |= LCF_NVME_PRLI;
2225
2226         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2227         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2228         logio->port_id[1] = sp->fcport->d_id.b.area;
2229         logio->port_id[2] = sp->fcport->d_id.b.domain;
2230         logio->vp_index = sp->vha->vp_idx;
2231 }
2232
2233 static void
2234 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2235 {
2236         struct srb_iocb *lio = &sp->u.iocb_cmd;
2237
2238         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2239         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2240
2241         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2242                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2243         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2244                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2245         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2246         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2247         logio->port_id[1] = sp->fcport->d_id.b.area;
2248         logio->port_id[2] = sp->fcport->d_id.b.domain;
2249         logio->vp_index = sp->vha->vp_idx;
2250 }
2251
2252 static void
2253 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2254 {
2255         struct qla_hw_data *ha = sp->vha->hw;
2256         struct srb_iocb *lio = &sp->u.iocb_cmd;
2257         uint16_t opts;
2258
2259         mbx->entry_type = MBX_IOCB_TYPE;
2260         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2261         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2262         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2263         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2264         if (HAS_EXTENDED_IDS(ha)) {
2265                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2266                 mbx->mb10 = cpu_to_le16(opts);
2267         } else {
2268                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2269         }
2270         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2271         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2272             sp->fcport->d_id.b.al_pa);
2273         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2274 }
2275
2276 static void
2277 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2278 {
2279         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2280         logio->control_flags =
2281             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2282         if (!sp->fcport->se_sess ||
2283             !sp->fcport->keep_nport_handle)
2284                 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2285         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2286         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2287         logio->port_id[1] = sp->fcport->d_id.b.area;
2288         logio->port_id[2] = sp->fcport->d_id.b.domain;
2289         logio->vp_index = sp->vha->vp_idx;
2290 }
2291
2292 static void
2293 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2294 {
2295         struct qla_hw_data *ha = sp->vha->hw;
2296
2297         mbx->entry_type = MBX_IOCB_TYPE;
2298         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2299         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2300         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2301             cpu_to_le16(sp->fcport->loop_id):
2302             cpu_to_le16(sp->fcport->loop_id << 8);
2303         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2304         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2305             sp->fcport->d_id.b.al_pa);
2306         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2307         /* Implicit: mbx->mbx10 = 0. */
2308 }
2309
2310 static void
2311 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2312 {
2313         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2314         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2315         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2316         logio->vp_index = sp->vha->vp_idx;
2317 }
2318
2319 static void
2320 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2321 {
2322         struct qla_hw_data *ha = sp->vha->hw;
2323
2324         mbx->entry_type = MBX_IOCB_TYPE;
2325         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2326         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2327         if (HAS_EXTENDED_IDS(ha)) {
2328                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2329                 mbx->mb10 = cpu_to_le16(BIT_0);
2330         } else {
2331                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2332         }
2333         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2334         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2335         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2336         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2337         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2338 }
2339
2340 static void
2341 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2342 {
2343         uint32_t flags;
2344         uint64_t lun;
2345         struct fc_port *fcport = sp->fcport;
2346         scsi_qla_host_t *vha = fcport->vha;
2347         struct qla_hw_data *ha = vha->hw;
2348         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2349         struct req_que *req = vha->req;
2350
2351         flags = iocb->u.tmf.flags;
2352         lun = iocb->u.tmf.lun;
2353
2354         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2355         tsk->entry_count = 1;
2356         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2357         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2358         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2359         tsk->control_flags = cpu_to_le32(flags);
2360         tsk->port_id[0] = fcport->d_id.b.al_pa;
2361         tsk->port_id[1] = fcport->d_id.b.area;
2362         tsk->port_id[2] = fcport->d_id.b.domain;
2363         tsk->vp_index = fcport->vha->vp_idx;
2364
2365         if (flags == TCF_LUN_RESET) {
2366                 int_to_scsilun(lun, &tsk->lun);
2367                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2368                         sizeof(tsk->lun));
2369         }
2370 }
2371
2372 static void
2373 qla2x00_els_dcmd_sp_free(void *data)
2374 {
2375         srb_t *sp = data;
2376         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2377
2378         kfree(sp->fcport);
2379
2380         if (elsio->u.els_logo.els_logo_pyld)
2381                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2382                     elsio->u.els_logo.els_logo_pyld,
2383                     elsio->u.els_logo.els_logo_pyld_dma);
2384
2385         del_timer(&elsio->timer);
2386         qla2x00_rel_sp(sp);
2387 }
2388
2389 static void
2390 qla2x00_els_dcmd_iocb_timeout(void *data)
2391 {
2392         srb_t *sp = data;
2393         fc_port_t *fcport = sp->fcport;
2394         struct scsi_qla_host *vha = sp->vha;
2395         struct qla_hw_data *ha = vha->hw;
2396         struct srb_iocb *lio = &sp->u.iocb_cmd;
2397         unsigned long flags = 0;
2398
2399         ql_dbg(ql_dbg_io, vha, 0x3069,
2400             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2401             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2402             fcport->d_id.b.al_pa);
2403
2404         /* Abort the exchange */
2405         spin_lock_irqsave(&ha->hardware_lock, flags);
2406         if (ha->isp_ops->abort_command(sp)) {
2407                 ql_dbg(ql_dbg_io, vha, 0x3070,
2408                     "mbx abort_command failed.\n");
2409         } else {
2410                 ql_dbg(ql_dbg_io, vha, 0x3071,
2411                     "mbx abort_command success.\n");
2412         }
2413         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2414
2415         complete(&lio->u.els_logo.comp);
2416 }
2417
2418 static void
2419 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2420 {
2421         srb_t *sp = ptr;
2422         fc_port_t *fcport = sp->fcport;
2423         struct srb_iocb *lio = &sp->u.iocb_cmd;
2424         struct scsi_qla_host *vha = sp->vha;
2425
2426         ql_dbg(ql_dbg_io, vha, 0x3072,
2427             "%s hdl=%x, portid=%02x%02x%02x done\n",
2428             sp->name, sp->handle, fcport->d_id.b.domain,
2429             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2430
2431         complete(&lio->u.els_logo.comp);
2432 }
2433
2434 int
2435 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2436     port_id_t remote_did)
2437 {
2438         srb_t *sp;
2439         fc_port_t *fcport = NULL;
2440         struct srb_iocb *elsio = NULL;
2441         struct qla_hw_data *ha = vha->hw;
2442         struct els_logo_payload logo_pyld;
2443         int rval = QLA_SUCCESS;
2444
2445         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2446         if (!fcport) {
2447                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2448                return -ENOMEM;
2449         }
2450
2451         /* Alloc SRB structure */
2452         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2453         if (!sp) {
2454                 kfree(fcport);
2455                 ql_log(ql_log_info, vha, 0x70e6,
2456                  "SRB allocation failed\n");
2457                 return -ENOMEM;
2458         }
2459
2460         elsio = &sp->u.iocb_cmd;
2461         fcport->loop_id = 0xFFFF;
2462         fcport->d_id.b.domain = remote_did.b.domain;
2463         fcport->d_id.b.area = remote_did.b.area;
2464         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2465
2466         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2467             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2468
2469         sp->type = SRB_ELS_DCMD;
2470         sp->name = "ELS_DCMD";
2471         sp->fcport = fcport;
2472         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2473         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2474         sp->done = qla2x00_els_dcmd_sp_done;
2475         sp->free = qla2x00_els_dcmd_sp_free;
2476
2477         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2478                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2479                             GFP_KERNEL);
2480
2481         if (!elsio->u.els_logo.els_logo_pyld) {
2482                 sp->free(sp);
2483                 return QLA_FUNCTION_FAILED;
2484         }
2485
2486         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2487
2488         elsio->u.els_logo.els_cmd = els_opcode;
2489         logo_pyld.opcode = els_opcode;
2490         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2491         logo_pyld.s_id[1] = vha->d_id.b.area;
2492         logo_pyld.s_id[2] = vha->d_id.b.domain;
2493         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2494         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2495
2496         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2497             sizeof(struct els_logo_payload));
2498
2499         rval = qla2x00_start_sp(sp);
2500         if (rval != QLA_SUCCESS) {
2501                 sp->free(sp);
2502                 return QLA_FUNCTION_FAILED;
2503         }
2504
2505         ql_dbg(ql_dbg_io, vha, 0x3074,
2506             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2507             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2508             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2509
2510         wait_for_completion(&elsio->u.els_logo.comp);
2511
2512         sp->free(sp);
2513         return rval;
2514 }
2515
2516 static void
2517 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2518 {
2519         scsi_qla_host_t *vha = sp->vha;
2520         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2521
2522         els_iocb->entry_type = ELS_IOCB_TYPE;
2523         els_iocb->entry_count = 1;
2524         els_iocb->sys_define = 0;
2525         els_iocb->entry_status = 0;
2526         els_iocb->handle = sp->handle;
2527         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2528         els_iocb->tx_dsd_count = 1;
2529         els_iocb->vp_index = vha->vp_idx;
2530         els_iocb->sof_type = EST_SOFI3;
2531         els_iocb->rx_dsd_count = 0;
2532         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2533
2534         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2535         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2536         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2537         els_iocb->control_flags = 0;
2538
2539         els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2540         els_iocb->tx_address[0] =
2541             cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2542         els_iocb->tx_address[1] =
2543             cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2544         els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2545
2546         els_iocb->rx_byte_count = 0;
2547         els_iocb->rx_address[0] = 0;
2548         els_iocb->rx_address[1] = 0;
2549         els_iocb->rx_len = 0;
2550
2551         sp->vha->qla_stats.control_requests++;
2552 }
2553
2554 static void
2555 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2556 {
2557         struct bsg_job *bsg_job = sp->u.bsg_job;
2558         struct fc_bsg_request *bsg_request = bsg_job->request;
2559
2560         els_iocb->entry_type = ELS_IOCB_TYPE;
2561         els_iocb->entry_count = 1;
2562         els_iocb->sys_define = 0;
2563         els_iocb->entry_status = 0;
2564         els_iocb->handle = sp->handle;
2565         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2566         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2567         els_iocb->vp_index = sp->vha->vp_idx;
2568         els_iocb->sof_type = EST_SOFI3;
2569         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2570
2571         els_iocb->opcode =
2572             sp->type == SRB_ELS_CMD_RPT ?
2573             bsg_request->rqst_data.r_els.els_code :
2574             bsg_request->rqst_data.h_els.command_code;
2575         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2576         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2577         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2578         els_iocb->control_flags = 0;
2579         els_iocb->rx_byte_count =
2580             cpu_to_le32(bsg_job->reply_payload.payload_len);
2581         els_iocb->tx_byte_count =
2582             cpu_to_le32(bsg_job->request_payload.payload_len);
2583
2584         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2585             (bsg_job->request_payload.sg_list)));
2586         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2587             (bsg_job->request_payload.sg_list)));
2588         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2589             (bsg_job->request_payload.sg_list));
2590
2591         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2592             (bsg_job->reply_payload.sg_list)));
2593         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2594             (bsg_job->reply_payload.sg_list)));
2595         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2596             (bsg_job->reply_payload.sg_list));
2597
2598         sp->vha->qla_stats.control_requests++;
2599 }
2600
2601 static void
2602 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2603 {
2604         uint16_t        avail_dsds;
2605         uint32_t        *cur_dsd;
2606         struct scatterlist *sg;
2607         int index;
2608         uint16_t tot_dsds;
2609         scsi_qla_host_t *vha = sp->vha;
2610         struct qla_hw_data *ha = vha->hw;
2611         struct bsg_job *bsg_job = sp->u.bsg_job;
2612         int loop_iterartion = 0;
2613         int entry_count = 1;
2614
2615         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2616         ct_iocb->entry_type = CT_IOCB_TYPE;
2617         ct_iocb->entry_status = 0;
2618         ct_iocb->handle1 = sp->handle;
2619         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2620         ct_iocb->status = cpu_to_le16(0);
2621         ct_iocb->control_flags = cpu_to_le16(0);
2622         ct_iocb->timeout = 0;
2623         ct_iocb->cmd_dsd_count =
2624             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2625         ct_iocb->total_dsd_count =
2626             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2627         ct_iocb->req_bytecount =
2628             cpu_to_le32(bsg_job->request_payload.payload_len);
2629         ct_iocb->rsp_bytecount =
2630             cpu_to_le32(bsg_job->reply_payload.payload_len);
2631
2632         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2633             (bsg_job->request_payload.sg_list)));
2634         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2635             (bsg_job->request_payload.sg_list)));
2636         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2637
2638         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2639             (bsg_job->reply_payload.sg_list)));
2640         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2641             (bsg_job->reply_payload.sg_list)));
2642         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2643
2644         avail_dsds = 1;
2645         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2646         index = 0;
2647         tot_dsds = bsg_job->reply_payload.sg_cnt;
2648
2649         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2650                 dma_addr_t       sle_dma;
2651                 cont_a64_entry_t *cont_pkt;
2652
2653                 /* Allocate additional continuation packets? */
2654                 if (avail_dsds == 0) {
2655                         /*
2656                         * Five DSDs are available in the Cont.
2657                         * Type 1 IOCB.
2658                                */
2659                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2660                             vha->hw->req_q_map[0]);
2661                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2662                         avail_dsds = 5;
2663                         entry_count++;
2664                 }
2665
2666                 sle_dma = sg_dma_address(sg);
2667                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2668                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2669                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2670                 loop_iterartion++;
2671                 avail_dsds--;
2672         }
2673         ct_iocb->entry_count = entry_count;
2674
2675         sp->vha->qla_stats.control_requests++;
2676 }
2677
2678 static void
2679 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2680 {
2681         uint16_t        avail_dsds;
2682         uint32_t        *cur_dsd;
2683         struct scatterlist *sg;
2684         int index;
2685         uint16_t tot_dsds;
2686         scsi_qla_host_t *vha = sp->vha;
2687         struct qla_hw_data *ha = vha->hw;
2688         struct bsg_job *bsg_job = sp->u.bsg_job;
2689         int loop_iterartion = 0;
2690         int entry_count = 1;
2691
2692         ct_iocb->entry_type = CT_IOCB_TYPE;
2693         ct_iocb->entry_status = 0;
2694         ct_iocb->sys_define = 0;
2695         ct_iocb->handle = sp->handle;
2696
2697         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2698         ct_iocb->vp_index = sp->vha->vp_idx;
2699         ct_iocb->comp_status = cpu_to_le16(0);
2700
2701         ct_iocb->cmd_dsd_count =
2702                 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2703         ct_iocb->timeout = 0;
2704         ct_iocb->rsp_dsd_count =
2705                 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2706         ct_iocb->rsp_byte_count =
2707             cpu_to_le32(bsg_job->reply_payload.payload_len);
2708         ct_iocb->cmd_byte_count =
2709             cpu_to_le32(bsg_job->request_payload.payload_len);
2710         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2711             (bsg_job->request_payload.sg_list)));
2712         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2713            (bsg_job->request_payload.sg_list)));
2714         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2715             (bsg_job->request_payload.sg_list));
2716
2717         avail_dsds = 1;
2718         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2719         index = 0;
2720         tot_dsds = bsg_job->reply_payload.sg_cnt;
2721
2722         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2723                 dma_addr_t       sle_dma;
2724                 cont_a64_entry_t *cont_pkt;
2725
2726                 /* Allocate additional continuation packets? */
2727                 if (avail_dsds == 0) {
2728                         /*
2729                         * Five DSDs are available in the Cont.
2730                         * Type 1 IOCB.
2731                                */
2732                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2733                             ha->req_q_map[0]);
2734                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2735                         avail_dsds = 5;
2736                         entry_count++;
2737                 }
2738
2739                 sle_dma = sg_dma_address(sg);
2740                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2741                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2742                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2743                 loop_iterartion++;
2744                 avail_dsds--;
2745         }
2746         ct_iocb->entry_count = entry_count;
2747 }
2748
2749 /*
2750  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2751  * @sp: command to send to the ISP
2752  *
2753  * Returns non-zero if a failure occurred, else zero.
2754  */
2755 int
2756 qla82xx_start_scsi(srb_t *sp)
2757 {
2758         int             nseg;
2759         unsigned long   flags;
2760         struct scsi_cmnd *cmd;
2761         uint32_t        *clr_ptr;
2762         uint32_t        index;
2763         uint32_t        handle;
2764         uint16_t        cnt;
2765         uint16_t        req_cnt;
2766         uint16_t        tot_dsds;
2767         struct device_reg_82xx __iomem *reg;
2768         uint32_t dbval;
2769         uint32_t *fcp_dl;
2770         uint8_t additional_cdb_len;
2771         struct ct6_dsd *ctx;
2772         struct scsi_qla_host *vha = sp->vha;
2773         struct qla_hw_data *ha = vha->hw;
2774         struct req_que *req = NULL;
2775         struct rsp_que *rsp = NULL;
2776
2777         /* Setup device pointers. */
2778         reg = &ha->iobase->isp82;
2779         cmd = GET_CMD_SP(sp);
2780         req = vha->req;
2781         rsp = ha->rsp_q_map[0];
2782
2783         /* So we know we haven't pci_map'ed anything yet */
2784         tot_dsds = 0;
2785
2786         dbval = 0x04 | (ha->portnum << 5);
2787
2788         /* Send marker if required */
2789         if (vha->marker_needed != 0) {
2790                 if (qla2x00_marker(vha, req,
2791                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2792                         ql_log(ql_log_warn, vha, 0x300c,
2793                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2794                         return QLA_FUNCTION_FAILED;
2795                 }
2796                 vha->marker_needed = 0;
2797         }
2798
2799         /* Acquire ring specific lock */
2800         spin_lock_irqsave(&ha->hardware_lock, flags);
2801
2802         /* Check for room in outstanding command list. */
2803         handle = req->current_outstanding_cmd;
2804         for (index = 1; index < req->num_outstanding_cmds; index++) {
2805                 handle++;
2806                 if (handle == req->num_outstanding_cmds)
2807                         handle = 1;
2808                 if (!req->outstanding_cmds[handle])
2809                         break;
2810         }
2811         if (index == req->num_outstanding_cmds)
2812                 goto queuing_error;
2813
2814         /* Map the sg table so we have an accurate count of sg entries needed */
2815         if (scsi_sg_count(cmd)) {
2816                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2817                     scsi_sg_count(cmd), cmd->sc_data_direction);
2818                 if (unlikely(!nseg))
2819                         goto queuing_error;
2820         } else
2821                 nseg = 0;
2822
2823         tot_dsds = nseg;
2824
2825         if (tot_dsds > ql2xshiftctondsd) {
2826                 struct cmd_type_6 *cmd_pkt;
2827                 uint16_t more_dsd_lists = 0;
2828                 struct dsd_dma *dsd_ptr;
2829                 uint16_t i;
2830
2831                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2832                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2833                         ql_dbg(ql_dbg_io, vha, 0x300d,
2834                             "Num of DSD list %d is than %d for cmd=%p.\n",
2835                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2836                             cmd);
2837                         goto queuing_error;
2838                 }
2839
2840                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2841                         goto sufficient_dsds;
2842                 else
2843                         more_dsd_lists -= ha->gbl_dsd_avail;
2844
2845                 for (i = 0; i < more_dsd_lists; i++) {
2846                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2847                         if (!dsd_ptr) {
2848                                 ql_log(ql_log_fatal, vha, 0x300e,
2849                                     "Failed to allocate memory for dsd_dma "
2850                                     "for cmd=%p.\n", cmd);
2851                                 goto queuing_error;
2852                         }
2853
2854                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2855                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2856                         if (!dsd_ptr->dsd_addr) {
2857                                 kfree(dsd_ptr);
2858                                 ql_log(ql_log_fatal, vha, 0x300f,
2859                                     "Failed to allocate memory for dsd_addr "
2860                                     "for cmd=%p.\n", cmd);
2861                                 goto queuing_error;
2862                         }
2863                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2864                         ha->gbl_dsd_avail++;
2865                 }
2866
2867 sufficient_dsds:
2868                 req_cnt = 1;
2869
2870                 if (req->cnt < (req_cnt + 2)) {
2871                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2872                                 &reg->req_q_out[0]);
2873                         if (req->ring_index < cnt)
2874                                 req->cnt = cnt - req->ring_index;
2875                         else
2876                                 req->cnt = req->length -
2877                                         (req->ring_index - cnt);
2878                         if (req->cnt < (req_cnt + 2))
2879                                 goto queuing_error;
2880                 }
2881
2882                 ctx = sp->u.scmd.ctx =
2883                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2884                 if (!ctx) {
2885                         ql_log(ql_log_fatal, vha, 0x3010,
2886                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2887                         goto queuing_error;
2888                 }
2889
2890                 memset(ctx, 0, sizeof(struct ct6_dsd));
2891                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2892                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2893                 if (!ctx->fcp_cmnd) {
2894                         ql_log(ql_log_fatal, vha, 0x3011,
2895                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2896                         goto queuing_error;
2897                 }
2898
2899                 /* Initialize the DSD list and dma handle */
2900                 INIT_LIST_HEAD(&ctx->dsd_list);
2901                 ctx->dsd_use_cnt = 0;
2902
2903                 if (cmd->cmd_len > 16) {
2904                         additional_cdb_len = cmd->cmd_len - 16;
2905                         if ((cmd->cmd_len % 4) != 0) {
2906                                 /* SCSI command bigger than 16 bytes must be
2907                                  * multiple of 4
2908                                  */
2909                                 ql_log(ql_log_warn, vha, 0x3012,
2910                                     "scsi cmd len %d not multiple of 4 "
2911                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2912                                 goto queuing_error_fcp_cmnd;
2913                         }
2914                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2915                 } else {
2916                         additional_cdb_len = 0;
2917                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2918                 }
2919
2920                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2921                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2922
2923                 /* Zero out remaining portion of packet. */
2924                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2925                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2926                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2927                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2928
2929                 /* Set NPORT-ID and LUN number*/
2930                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2931                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2932                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2933                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2934                 cmd_pkt->vp_index = sp->vha->vp_idx;
2935
2936                 /* Build IOCB segments */
2937                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2938                         goto queuing_error_fcp_cmnd;
2939
2940                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2941                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2942
2943                 /* build FCP_CMND IU */
2944                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2945                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2946                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2947
2948                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2949                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2950                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2951                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2952
2953                 /* Populate the FCP_PRIO. */
2954                 if (ha->flags.fcp_prio_enabled)
2955                         ctx->fcp_cmnd->task_attribute |=
2956                             sp->fcport->fcp_prio << 3;
2957
2958                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2959
2960                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2961                     additional_cdb_len);
2962                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2963
2964                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2965                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2966                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2967                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2968                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2969
2970                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2971                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2972                 /* Set total data segment count. */
2973                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2974                 /* Specify response queue number where
2975                  * completion should happen
2976                  */
2977                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2978         } else {
2979                 struct cmd_type_7 *cmd_pkt;
2980                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2981                 if (req->cnt < (req_cnt + 2)) {
2982                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2983                             &reg->req_q_out[0]);
2984                         if (req->ring_index < cnt)
2985                                 req->cnt = cnt - req->ring_index;
2986                         else
2987                                 req->cnt = req->length -
2988                                         (req->ring_index - cnt);
2989                 }
2990                 if (req->cnt < (req_cnt + 2))
2991                         goto queuing_error;
2992
2993                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2994                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2995
2996                 /* Zero out remaining portion of packet. */
2997                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2998                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2999                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3000                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3001
3002                 /* Set NPORT-ID and LUN number*/
3003                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3004                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3005                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3006                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3007                 cmd_pkt->vp_index = sp->vha->vp_idx;
3008
3009                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3010                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3011                     sizeof(cmd_pkt->lun));
3012
3013                 /* Populate the FCP_PRIO. */
3014                 if (ha->flags.fcp_prio_enabled)
3015                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3016
3017                 /* Load SCSI command packet. */
3018                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3019                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3020
3021                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3022
3023                 /* Build IOCB segments */
3024                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3025
3026                 /* Set total data segment count. */
3027                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3028                 /* Specify response queue number where
3029                  * completion should happen.
3030                  */
3031                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3032
3033         }
3034         /* Build command packet. */
3035         req->current_outstanding_cmd = handle;
3036         req->outstanding_cmds[handle] = sp;
3037         sp->handle = handle;
3038         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3039         req->cnt -= req_cnt;
3040         wmb();
3041
3042         /* Adjust ring index. */
3043         req->ring_index++;
3044         if (req->ring_index == req->length) {
3045                 req->ring_index = 0;
3046                 req->ring_ptr = req->ring;
3047         } else
3048                 req->ring_ptr++;
3049
3050         sp->flags |= SRB_DMA_VALID;
3051
3052         /* Set chip new ring index. */
3053         /* write, read and verify logic */
3054         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3055         if (ql2xdbwr)
3056                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3057         else {
3058                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3059                 wmb();
3060                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3061                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3062                         wmb();
3063                 }
3064         }
3065
3066         /* Manage unprocessed RIO/ZIO commands in response queue. */
3067         if (vha->flags.process_response_queue &&
3068             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3069                 qla24xx_process_response_queue(vha, rsp);
3070
3071         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3072         return QLA_SUCCESS;
3073
3074 queuing_error_fcp_cmnd:
3075         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3076 queuing_error:
3077         if (tot_dsds)
3078                 scsi_dma_unmap(cmd);
3079
3080         if (sp->u.scmd.ctx) {
3081                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3082                 sp->u.scmd.ctx = NULL;
3083         }
3084         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3085
3086         return QLA_FUNCTION_FAILED;
3087 }
3088
3089 static void
3090 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3091 {
3092         struct srb_iocb *aio = &sp->u.iocb_cmd;
3093         scsi_qla_host_t *vha = sp->vha;
3094         struct req_que *req = vha->req;
3095
3096         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3097         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3098         abt_iocb->entry_count = 1;
3099         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3100         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3101         abt_iocb->handle_to_abort =
3102             cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3103         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3104         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3105         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3106         abt_iocb->vp_index = vha->vp_idx;
3107         abt_iocb->req_que_no = cpu_to_le16(req->id);
3108         /* Send the command to the firmware */
3109         wmb();
3110 }
3111
3112 static void
3113 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3114 {
3115         int i, sz;
3116
3117         mbx->entry_type = MBX_IOCB_TYPE;
3118         mbx->handle = sp->handle;
3119         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3120
3121         for (i = 0; i < sz; i++)
3122                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3123 }
3124
3125 static void
3126 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3127 {
3128         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3129         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3130         ct_pkt->handle = sp->handle;
3131 }
3132
3133 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3134         struct nack_to_isp *nack)
3135 {
3136         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3137
3138         nack->entry_type = NOTIFY_ACK_TYPE;
3139         nack->entry_count = 1;
3140         nack->ox_id = ntfy->ox_id;
3141
3142         nack->u.isp24.handle = sp->handle;
3143         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3144         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3145                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3146                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3147         }
3148         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3149         nack->u.isp24.status = ntfy->u.isp24.status;
3150         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3151         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3152         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3153         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3154         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3155         nack->u.isp24.srr_flags = 0;
3156         nack->u.isp24.srr_reject_code = 0;
3157         nack->u.isp24.srr_reject_code_expl = 0;
3158         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3159 }
3160
3161 /*
3162  * Build NVME LS request
3163  */
3164 static int
3165 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3166 {
3167         struct srb_iocb *nvme;
3168         int     rval = QLA_SUCCESS;
3169
3170         nvme = &sp->u.iocb_cmd;
3171         cmd_pkt->entry_type = PT_LS4_REQUEST;
3172         cmd_pkt->entry_count = 1;
3173         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3174
3175         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3176         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3177         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3178
3179         cmd_pkt->tx_dseg_count = 1;
3180         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3181         cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3182         cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3183         cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3184
3185         cmd_pkt->rx_dseg_count = 1;
3186         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3187         cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
3188         cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3189         cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3190
3191         return rval;
3192 }
3193
3194 int
3195 qla2x00_start_sp(srb_t *sp)
3196 {
3197         int rval;
3198         scsi_qla_host_t *vha = sp->vha;
3199         struct qla_hw_data *ha = vha->hw;
3200         void *pkt;
3201         unsigned long flags;
3202
3203         rval = QLA_FUNCTION_FAILED;
3204         spin_lock_irqsave(&ha->hardware_lock, flags);
3205         pkt = qla2x00_alloc_iocbs(vha, sp);
3206         if (!pkt) {
3207                 ql_log(ql_log_warn, vha, 0x700c,
3208                     "qla2x00_alloc_iocbs failed.\n");
3209                 goto done;
3210         }
3211
3212         rval = QLA_SUCCESS;
3213         switch (sp->type) {
3214         case SRB_LOGIN_CMD:
3215                 IS_FWI2_CAPABLE(ha) ?
3216                     qla24xx_login_iocb(sp, pkt) :
3217                     qla2x00_login_iocb(sp, pkt);
3218                 break;
3219         case SRB_PRLI_CMD:
3220                 qla24xx_prli_iocb(sp, pkt);
3221                 break;
3222         case SRB_LOGOUT_CMD:
3223                 IS_FWI2_CAPABLE(ha) ?
3224                     qla24xx_logout_iocb(sp, pkt) :
3225                     qla2x00_logout_iocb(sp, pkt);
3226                 break;
3227         case SRB_ELS_CMD_RPT:
3228         case SRB_ELS_CMD_HST:
3229                 qla24xx_els_iocb(sp, pkt);
3230                 break;
3231         case SRB_CT_CMD:
3232                 IS_FWI2_CAPABLE(ha) ?
3233                     qla24xx_ct_iocb(sp, pkt) :
3234                     qla2x00_ct_iocb(sp, pkt);
3235                 break;
3236         case SRB_ADISC_CMD:
3237                 IS_FWI2_CAPABLE(ha) ?
3238                     qla24xx_adisc_iocb(sp, pkt) :
3239                     qla2x00_adisc_iocb(sp, pkt);
3240                 break;
3241         case SRB_TM_CMD:
3242                 IS_QLAFX00(ha) ?
3243                     qlafx00_tm_iocb(sp, pkt) :
3244                     qla24xx_tm_iocb(sp, pkt);
3245                 break;
3246         case SRB_FXIOCB_DCMD:
3247         case SRB_FXIOCB_BCMD:
3248                 qlafx00_fxdisc_iocb(sp, pkt);
3249                 break;
3250         case SRB_NVME_LS:
3251                 qla_nvme_ls(sp, pkt);
3252                 break;
3253         case SRB_ABT_CMD:
3254                 IS_QLAFX00(ha) ?
3255                         qlafx00_abort_iocb(sp, pkt) :
3256                         qla24xx_abort_iocb(sp, pkt);
3257                 break;
3258         case SRB_ELS_DCMD:
3259                 qla24xx_els_logo_iocb(sp, pkt);
3260                 break;
3261         case SRB_CT_PTHRU_CMD:
3262                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3263                 break;
3264         case SRB_MB_IOCB:
3265                 qla2x00_mb_iocb(sp, pkt);
3266                 break;
3267         case SRB_NACK_PLOGI:
3268         case SRB_NACK_PRLI:
3269         case SRB_NACK_LOGO:
3270                 qla2x00_send_notify_ack_iocb(sp, pkt);
3271                 break;
3272         default:
3273                 break;
3274         }
3275
3276         wmb();
3277         qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3278 done:
3279         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3280         return rval;
3281 }
3282
3283 static void
3284 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3285                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3286 {
3287         uint16_t avail_dsds;
3288         uint32_t *cur_dsd;
3289         uint32_t req_data_len = 0;
3290         uint32_t rsp_data_len = 0;
3291         struct scatterlist *sg;
3292         int index;
3293         int entry_count = 1;
3294         struct bsg_job *bsg_job = sp->u.bsg_job;
3295
3296         /*Update entry type to indicate bidir command */
3297         *((uint32_t *)(&cmd_pkt->entry_type)) =
3298                 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3299
3300         /* Set the transfer direction, in this set both flags
3301          * Also set the BD_WRAP_BACK flag, firmware will take care
3302          * assigning DID=SID for outgoing pkts.
3303          */
3304         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3305         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3306         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3307                                                         BD_WRAP_BACK);
3308
3309         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3310         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3311         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3312         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3313
3314         vha->bidi_stats.transfer_bytes += req_data_len;
3315         vha->bidi_stats.io_count++;
3316
3317         vha->qla_stats.output_bytes += req_data_len;
3318         vha->qla_stats.output_requests++;
3319
3320         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3321          * are bundled in continuation iocb
3322          */
3323         avail_dsds = 1;
3324         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3325
3326         index = 0;
3327
3328         for_each_sg(bsg_job->request_payload.sg_list, sg,
3329                                 bsg_job->request_payload.sg_cnt, index) {
3330                 dma_addr_t sle_dma;
3331                 cont_a64_entry_t *cont_pkt;
3332
3333                 /* Allocate additional continuation packets */
3334                 if (avail_dsds == 0) {
3335                         /* Continuation type 1 IOCB can accomodate
3336                          * 5 DSDS
3337                          */
3338                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3339                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3340                         avail_dsds = 5;
3341                         entry_count++;
3342                 }
3343                 sle_dma = sg_dma_address(sg);
3344                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3345                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3346                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3347                 avail_dsds--;
3348         }
3349         /* For read request DSD will always goes to continuation IOCB
3350          * and follow the write DSD. If there is room on the current IOCB
3351          * then it is added to that IOCB else new continuation IOCB is
3352          * allocated.
3353          */
3354         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3355                                 bsg_job->reply_payload.sg_cnt, index) {
3356                 dma_addr_t sle_dma;
3357                 cont_a64_entry_t *cont_pkt;
3358
3359                 /* Allocate additional continuation packets */
3360                 if (avail_dsds == 0) {
3361                         /* Continuation type 1 IOCB can accomodate
3362                          * 5 DSDS
3363                          */
3364                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3365                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3366                         avail_dsds = 5;
3367                         entry_count++;
3368                 }
3369                 sle_dma = sg_dma_address(sg);
3370                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3371                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3372                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3373                 avail_dsds--;
3374         }
3375         /* This value should be same as number of IOCB required for this cmd */
3376         cmd_pkt->entry_count = entry_count;
3377 }
3378
3379 int
3380 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3381 {
3382
3383         struct qla_hw_data *ha = vha->hw;
3384         unsigned long flags;
3385         uint32_t handle;
3386         uint32_t index;
3387         uint16_t req_cnt;
3388         uint16_t cnt;
3389         uint32_t *clr_ptr;
3390         struct cmd_bidir *cmd_pkt = NULL;
3391         struct rsp_que *rsp;
3392         struct req_que *req;
3393         int rval = EXT_STATUS_OK;
3394
3395         rval = QLA_SUCCESS;
3396
3397         rsp = ha->rsp_q_map[0];
3398         req = vha->req;
3399
3400         /* Send marker if required */
3401         if (vha->marker_needed != 0) {
3402                 if (qla2x00_marker(vha, req,
3403                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3404                         return EXT_STATUS_MAILBOX;
3405                 vha->marker_needed = 0;
3406         }
3407
3408         /* Acquire ring specific lock */
3409         spin_lock_irqsave(&ha->hardware_lock, flags);
3410
3411         /* Check for room in outstanding command list. */
3412         handle = req->current_outstanding_cmd;
3413         for (index = 1; index < req->num_outstanding_cmds; index++) {
3414                 handle++;
3415                 if (handle == req->num_outstanding_cmds)
3416                         handle = 1;
3417                 if (!req->outstanding_cmds[handle])
3418                         break;
3419         }
3420
3421         if (index == req->num_outstanding_cmds) {
3422                 rval = EXT_STATUS_BUSY;
3423                 goto queuing_error;
3424         }
3425
3426         /* Calculate number of IOCB required */
3427         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3428
3429         /* Check for room on request queue. */
3430         if (req->cnt < req_cnt + 2) {
3431                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3432                     RD_REG_DWORD_RELAXED(req->req_q_out);
3433                 if  (req->ring_index < cnt)
3434                         req->cnt = cnt - req->ring_index;
3435                 else
3436                         req->cnt = req->length -
3437                                 (req->ring_index - cnt);
3438         }
3439         if (req->cnt < req_cnt + 2) {
3440                 rval = EXT_STATUS_BUSY;
3441                 goto queuing_error;
3442         }
3443
3444         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3445         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3446
3447         /* Zero out remaining portion of packet. */
3448         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3449         clr_ptr = (uint32_t *)cmd_pkt + 2;
3450         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3451
3452         /* Set NPORT-ID  (of vha)*/
3453         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3454         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3455         cmd_pkt->port_id[1] = vha->d_id.b.area;
3456         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3457
3458         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3459         cmd_pkt->entry_status = (uint8_t) rsp->id;
3460         /* Build command packet. */
3461         req->current_outstanding_cmd = handle;
3462         req->outstanding_cmds[handle] = sp;
3463         sp->handle = handle;
3464         req->cnt -= req_cnt;
3465
3466         /* Send the command to the firmware */
3467         wmb();
3468         qla2x00_start_iocbs(vha, req);
3469 queuing_error:
3470         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3471         return rval;
3472 }