]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/lpfc/lpfc_scsi.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6
[karo-tx-linux.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2005 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_tcq.h>
29 #include <scsi/scsi_transport_fc.h>
30
31 #include "lpfc_version.h"
32 #include "lpfc_hw.h"
33 #include "lpfc_sli.h"
34 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39
40 #define LPFC_RESET_WAIT  2
41 #define LPFC_ABORT_WAIT  2
42
43 static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
44 {
45         fcmd->fcpLunLsl = 0;
46         fcmd->fcpLunMsl = swab16((uint16_t)lun);
47 }
48
49 /*
50  * This routine allocates a scsi buffer, which contains all the necessary
51  * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
52  * contains information to build the IOCB.  The DMAable region contains
53  * memory for the FCP CMND, FCP RSP, and the inital BPL.  In addition to
54  * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
55  * and the BPL BDE is setup in the IOCB.
56  */
57 static struct lpfc_scsi_buf *
58 lpfc_get_scsi_buf(struct lpfc_hba * phba)
59 {
60         struct lpfc_scsi_buf *psb;
61         struct ulp_bde64 *bpl;
62         IOCB_t *iocb;
63         dma_addr_t pdma_phys;
64
65         psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
66         if (!psb)
67                 return NULL;
68         memset(psb, 0, sizeof (struct lpfc_scsi_buf));
69         psb->scsi_hba = phba;
70
71         /*
72          * Get memory from the pci pool to map the virt space to pci bus space
73          * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
74          * struct fcp_rsp and the number of bde's necessary to support the
75          * sg_tablesize.
76          */
77         psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
78                                                         &psb->dma_handle);
79         if (!psb->data) {
80                 kfree(psb);
81                 return NULL;
82         }
83
84         /* Initialize virtual ptrs to dma_buf region. */
85         memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
86
87         psb->fcp_cmnd = psb->data;
88         psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
89         psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
90                                                         sizeof(struct fcp_rsp);
91
92         /* Initialize local short-hand pointers. */
93         bpl = psb->fcp_bpl;
94         pdma_phys = psb->dma_handle;
95
96         /*
97          * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
98          * list bdes.  Initialize the first two and leave the rest for
99          * queuecommand.
100          */
101         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
102         bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
103         bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
104         bpl->tus.f.bdeFlags = BUFF_USE_CMND;
105         bpl->tus.w = le32_to_cpu(bpl->tus.w);
106         bpl++;
107
108         /* Setup the physical region for the FCP RSP */
109         pdma_phys += sizeof (struct fcp_cmnd);
110         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
111         bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
112         bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
113         bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
114         bpl->tus.w = le32_to_cpu(bpl->tus.w);
115
116         /*
117          * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
118          * initialize it with all known data now.
119          */
120         pdma_phys += (sizeof (struct fcp_rsp));
121         iocb = &psb->cur_iocbq.iocb;
122         iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
123         iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
124         iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
125         iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
126         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
127         iocb->ulpBdeCount = 1;
128         iocb->ulpClass = CLASS3;
129
130         return psb;
131 }
132
133 static void
134 lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
135 {
136         struct lpfc_hba *phba = psb->scsi_hba;
137
138         /*
139          * There are only two special cases to consider.  (1) the scsi command
140          * requested scatter-gather usage or (2) the scsi command allocated
141          * a request buffer, but did not request use_sg.  There is a third
142          * case, but it does not require resource deallocation.
143          */
144         if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
145                 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
146                                 psb->seg_cnt, psb->pCmd->sc_data_direction);
147         } else {
148                  if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
149                         dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
150                                                 psb->pCmd->request_bufflen,
151                                                 psb->pCmd->sc_data_direction);
152                  }
153         }
154
155         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
156 }
157
158 static int
159 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
160 {
161         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
162         struct scatterlist *sgel = NULL;
163         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
164         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
165         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
166         dma_addr_t physaddr;
167         uint32_t i, num_bde = 0;
168         int datadir = scsi_cmnd->sc_data_direction;
169         int dma_error;
170
171         /*
172          * There are three possibilities here - use scatter-gather segment, use
173          * the single mapping, or neither.  Start the lpfc command prep by
174          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
175          * data bde entry.
176          */
177         bpl += 2;
178         if (scsi_cmnd->use_sg) {
179                 /*
180                  * The driver stores the segment count returned from pci_map_sg
181                  * because this a count of dma-mappings used to map the use_sg
182                  * pages.  They are not guaranteed to be the same for those
183                  * architectures that implement an IOMMU.
184                  */
185                 sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
186                 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
187                                                 scsi_cmnd->use_sg, datadir);
188                 if (lpfc_cmd->seg_cnt == 0)
189                         return 1;
190
191                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
192                         printk(KERN_ERR "%s: Too many sg segments from "
193                                "dma_map_sg.  Config %d, seg_cnt %d",
194                                __FUNCTION__, phba->cfg_sg_seg_cnt,
195                                lpfc_cmd->seg_cnt);
196                         dma_unmap_sg(&phba->pcidev->dev, sgel,
197                                      lpfc_cmd->seg_cnt, datadir);
198                         return 1;
199                 }
200
201                 /*
202                  * The driver established a maximum scatter-gather segment count
203                  * during probe that limits the number of sg elements in any
204                  * single scsi command.  Just run through the seg_cnt and format
205                  * the bde's.
206                  */
207                 for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
208                         physaddr = sg_dma_address(sgel);
209                         bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
210                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
211                         bpl->tus.f.bdeSize = sg_dma_len(sgel);
212                         if (datadir == DMA_TO_DEVICE)
213                                 bpl->tus.f.bdeFlags = 0;
214                         else
215                                 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
216                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
217                         bpl++;
218                         sgel++;
219                         num_bde++;
220                 }
221         } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
222                 physaddr = dma_map_single(&phba->pcidev->dev,
223                                           scsi_cmnd->request_buffer,
224                                           scsi_cmnd->request_bufflen,
225                                           datadir);
226                 dma_error = dma_mapping_error(physaddr);
227                 if (dma_error) {
228                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
229                                 "%d:0718 Unable to dma_map_single "
230                                 "request_buffer: x%x\n",
231                                 phba->brd_no, dma_error);
232                         return 1;
233                 }
234
235                 lpfc_cmd->nonsg_phys = physaddr;
236                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
237                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
238                 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
239                 if (datadir == DMA_TO_DEVICE)
240                         bpl->tus.f.bdeFlags = 0;
241                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
242                 num_bde = 1;
243                 bpl++;
244         }
245
246         /*
247          * Finish initializing those IOCB fields that are dependent on the
248          * scsi_cmnd request_buffer
249          */
250         iocb_cmd->un.fcpi64.bdl.bdeSize +=
251                 (num_bde * sizeof (struct ulp_bde64));
252         iocb_cmd->ulpBdeCount = 1;
253         iocb_cmd->ulpLe = 1;
254         fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
255         return 0;
256 }
257
258 static void
259 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
260 {
261         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
262         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
263         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
264         struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
265         uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
266         uint32_t resp_info = fcprsp->rspStatus2;
267         uint32_t scsi_status = fcprsp->rspStatus3;
268         uint32_t host_status = DID_OK;
269         uint32_t rsplen = 0;
270
271         /*
272          *  If this is a task management command, there is no
273          *  scsi packet associated with this lpfc_cmd.  The driver
274          *  consumes it.
275          */
276         if (fcpcmd->fcpCntl2) {
277                 scsi_status = 0;
278                 goto out;
279         }
280
281         lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
282                         "%d:0730 FCP command failed: RSP "
283                         "Data: x%x x%x x%x x%x x%x x%x\n",
284                         phba->brd_no, resp_info, scsi_status,
285                         be32_to_cpu(fcprsp->rspResId),
286                         be32_to_cpu(fcprsp->rspSnsLen),
287                         be32_to_cpu(fcprsp->rspRspLen),
288                         fcprsp->rspInfo3);
289
290         if (resp_info & RSP_LEN_VALID) {
291                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
292                 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
293                     (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
294                         host_status = DID_ERROR;
295                         goto out;
296                 }
297         }
298
299         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
300                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
301                 if (snslen > SCSI_SENSE_BUFFERSIZE)
302                         snslen = SCSI_SENSE_BUFFERSIZE;
303
304                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
305         }
306
307         cmnd->resid = 0;
308         if (resp_info & RESID_UNDER) {
309                 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
310
311                 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
312                                 "%d:0716 FCP Read Underrun, expected %d, "
313                                 "residual %d Data: x%x x%x x%x\n", phba->brd_no,
314                                 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
315                                 fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
316
317                 /*
318                  * The cmnd->underflow is the minimum number of bytes that must
319                  * be transfered for this command.  Provided a sense condition
320                  * is not present, make sure the actual amount transferred is at
321                  * least the underflow value or fail.
322                  */
323                 if (!(resp_info & SNS_LEN_VALID) &&
324                     (scsi_status == SAM_STAT_GOOD) &&
325                     (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
326                         lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
327                                         "%d:0717 FCP command x%x residual "
328                                         "underrun converted to error "
329                                         "Data: x%x x%x x%x\n", phba->brd_no,
330                                         cmnd->cmnd[0], cmnd->request_bufflen,
331                                         cmnd->resid, cmnd->underflow);
332
333                         host_status = DID_ERROR;
334                 }
335         } else if (resp_info & RESID_OVER) {
336                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
337                                 "%d:0720 FCP command x%x residual "
338                                 "overrun error. Data: x%x x%x \n",
339                                 phba->brd_no, cmnd->cmnd[0],
340                                 cmnd->request_bufflen, cmnd->resid);
341                 host_status = DID_ERROR;
342
343         /*
344          * Check SLI validation that all the transfer was actually done
345          * (fcpi_parm should be zero). Apply check only to reads.
346          */
347         } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
348                         (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
349                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
350                         "%d:0734 FCP Read Check Error Data: "
351                         "x%x x%x x%x x%x\n", phba->brd_no,
352                         be32_to_cpu(fcpcmd->fcpDl),
353                         be32_to_cpu(fcprsp->rspResId),
354                         fcpi_parm, cmnd->cmnd[0]);
355                 host_status = DID_ERROR;
356                 cmnd->resid = cmnd->request_bufflen;
357         }
358
359  out:
360         cmnd->result = ScsiResult(host_status, scsi_status);
361 }
362
363 static void
364 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
365                         struct lpfc_iocbq *pIocbOut)
366 {
367         struct lpfc_scsi_buf *lpfc_cmd =
368                 (struct lpfc_scsi_buf *) pIocbIn->context1;
369         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
370         struct lpfc_nodelist *pnode = rdata->pnode;
371         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
372         unsigned long iflag;
373
374         lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
375         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
376
377         if (lpfc_cmd->status) {
378                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
379                     (lpfc_cmd->result & IOERR_DRVR_MASK))
380                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
381                 else if (lpfc_cmd->status >= IOSTAT_CNT)
382                         lpfc_cmd->status = IOSTAT_DEFAULT;
383
384                 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
385                                 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
386                                 "x%x result: x%x Data: x%x x%x\n",
387                                 phba->brd_no, cmd->cmnd[0], cmd->device->id,
388                                 cmd->device->lun, lpfc_cmd->status,
389                                 lpfc_cmd->result, pIocbOut->iocb.ulpContext,
390                                 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
391
392                 switch (lpfc_cmd->status) {
393                 case IOSTAT_FCP_RSP_ERROR:
394                         /* Call FCP RSP handler to determine result */
395                         lpfc_handle_fcp_err(lpfc_cmd);
396                         break;
397                 case IOSTAT_NPORT_BSY:
398                 case IOSTAT_FABRIC_BSY:
399                         cmd->result = ScsiResult(DID_BUS_BUSY, 0);
400                         break;
401                 default:
402                         cmd->result = ScsiResult(DID_ERROR, 0);
403                         break;
404                 }
405
406                 if (pnode) {
407                         if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
408                                 cmd->result = ScsiResult(DID_BUS_BUSY,
409                                         SAM_STAT_BUSY);
410                 }
411                 else {
412                         cmd->result = ScsiResult(DID_NO_CONNECT, 0);
413                 }
414         } else {
415                 cmd->result = ScsiResult(DID_OK, 0);
416         }
417
418         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
419                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
420
421                 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
422                                 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
423                                 "SNS x%x x%x Data: x%x x%x\n",
424                                 phba->brd_no, cmd->device->id,
425                                 cmd->device->lun, cmd, cmd->result,
426                                 *lp, *(lp + 3), cmd->retries, cmd->resid);
427         }
428
429         spin_lock_irqsave(phba->host->host_lock, iflag);
430         lpfc_free_scsi_buf(lpfc_cmd);
431         cmd->host_scribble = NULL;
432         spin_unlock_irqrestore(phba->host->host_lock, iflag);
433
434         cmd->scsi_done(cmd);
435 }
436
437 static void
438 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
439                         struct lpfc_nodelist *pnode)
440 {
441         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
442         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
443         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
444         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
445         int datadir = scsi_cmnd->sc_data_direction;
446
447         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
448
449         lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
450
451         memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
452
453         if (scsi_cmnd->device->tagged_supported) {
454                 switch (scsi_cmnd->tag) {
455                 case HEAD_OF_QUEUE_TAG:
456                         fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
457                         break;
458                 case ORDERED_QUEUE_TAG:
459                         fcp_cmnd->fcpCntl1 = ORDERED_Q;
460                         break;
461                 default:
462                         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
463                         break;
464                 }
465         } else
466                 fcp_cmnd->fcpCntl1 = 0;
467
468         /*
469          * There are three possibilities here - use scatter-gather segment, use
470          * the single mapping, or neither.  Start the lpfc command prep by
471          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
472          * data bde entry.
473          */
474         if (scsi_cmnd->use_sg) {
475                 if (datadir == DMA_TO_DEVICE) {
476                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
477                         iocb_cmd->un.fcpi.fcpi_parm = 0;
478                         iocb_cmd->ulpPU = 0;
479                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
480                         phba->fc4OutputRequests++;
481                 } else {
482                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
483                         iocb_cmd->ulpPU = PARM_READ_CHECK;
484                         iocb_cmd->un.fcpi.fcpi_parm =
485                                 scsi_cmnd->request_bufflen;
486                         fcp_cmnd->fcpCntl3 = READ_DATA;
487                         phba->fc4InputRequests++;
488                 }
489         } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
490                 if (datadir == DMA_TO_DEVICE) {
491                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
492                         iocb_cmd->un.fcpi.fcpi_parm = 0;
493                         iocb_cmd->ulpPU = 0;
494                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
495                         phba->fc4OutputRequests++;
496                 } else {
497                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
498                         iocb_cmd->ulpPU = PARM_READ_CHECK;
499                         iocb_cmd->un.fcpi.fcpi_parm =
500                                 scsi_cmnd->request_bufflen;
501                         fcp_cmnd->fcpCntl3 = READ_DATA;
502                         phba->fc4InputRequests++;
503                 }
504         } else {
505                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
506                 iocb_cmd->un.fcpi.fcpi_parm = 0;
507                 iocb_cmd->ulpPU = 0;
508                 fcp_cmnd->fcpCntl3 = 0;
509                 phba->fc4ControlRequests++;
510         }
511
512         /*
513          * Finish initializing those IOCB fields that are independent
514          * of the scsi_cmnd request_buffer
515          */
516         piocbq->iocb.ulpContext = pnode->nlp_rpi;
517         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
518                 piocbq->iocb.ulpFCP2Rcvy = 1;
519
520         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
521         piocbq->context1  = lpfc_cmd;
522         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
523         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
524 }
525
526 static int
527 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
528                              struct lpfc_scsi_buf *lpfc_cmd,
529                              uint8_t task_mgmt_cmd)
530 {
531         struct lpfc_sli *psli;
532         struct lpfc_iocbq *piocbq;
533         IOCB_t *piocb;
534         struct fcp_cmnd *fcp_cmnd;
535         struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
536         struct lpfc_rport_data *rdata = scsi_dev->hostdata;
537         struct lpfc_nodelist *ndlp = rdata->pnode;
538
539         if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
540                 return 0;
541         }
542
543         psli = &phba->sli;
544         piocbq = &(lpfc_cmd->cur_iocbq);
545         piocb = &piocbq->iocb;
546
547         fcp_cmnd = lpfc_cmd->fcp_cmnd;
548         lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
549         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
550
551         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
552
553         piocb->ulpContext = ndlp->nlp_rpi;
554         if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
555                 piocb->ulpFCP2Rcvy = 1;
556         }
557         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
558
559         /* ulpTimeout is only one byte */
560         if (lpfc_cmd->timeout > 0xff) {
561                 /*
562                  * Do not timeout the command at the firmware level.
563                  * The driver will provide the timeout mechanism.
564                  */
565                 piocb->ulpTimeout = 0;
566         } else {
567                 piocb->ulpTimeout = lpfc_cmd->timeout;
568         }
569
570         lpfc_cmd->rdata = rdata;
571
572         switch (task_mgmt_cmd) {
573         case FCP_LUN_RESET:
574                 /* Issue LUN Reset to TGT <num> LUN <num> */
575                 lpfc_printf_log(phba,
576                                 KERN_INFO,
577                                 LOG_FCP,
578                                 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
579                                 "Data: x%x x%x\n",
580                                 phba->brd_no,
581                                 scsi_dev->id, scsi_dev->lun,
582                                 ndlp->nlp_rpi, ndlp->nlp_flag);
583
584                 break;
585         case FCP_ABORT_TASK_SET:
586                 /* Issue Abort Task Set to TGT <num> LUN <num> */
587                 lpfc_printf_log(phba,
588                                 KERN_INFO,
589                                 LOG_FCP,
590                                 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
591                                 "Data: x%x x%x\n",
592                                 phba->brd_no,
593                                 scsi_dev->id, scsi_dev->lun,
594                                 ndlp->nlp_rpi, ndlp->nlp_flag);
595
596                 break;
597         case FCP_TARGET_RESET:
598                 /* Issue Target Reset to TGT <num> */
599                 lpfc_printf_log(phba,
600                                 KERN_INFO,
601                                 LOG_FCP,
602                                 "%d:0702 Issue Target Reset to TGT %d "
603                                 "Data: x%x x%x\n",
604                                 phba->brd_no,
605                                 scsi_dev->id, ndlp->nlp_rpi,
606                                 ndlp->nlp_flag);
607                 break;
608         }
609
610         return (1);
611 }
612
613 static int
614 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
615 {
616         struct lpfc_iocbq *iocbq;
617         struct lpfc_iocbq *iocbqrsp = NULL;
618         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
619         int ret;
620
621         ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
622         if (!ret)
623                 return FAILED;
624
625         lpfc_cmd->scsi_hba = phba;
626         iocbq = &lpfc_cmd->cur_iocbq;
627         list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
628         if (!iocbqrsp)
629                 return FAILED;
630         memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
631
632         iocbq->iocb_flag |= LPFC_IO_POLL;
633         ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
634                      &phba->sli.ring[phba->sli.fcp_ring],
635                      iocbq, SLI_IOCB_HIGH_PRIORITY,
636                      iocbqrsp,
637                      lpfc_cmd->timeout);
638         if (ret != IOCB_SUCCESS) {
639                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
640                 ret = FAILED;
641         } else {
642                 ret = SUCCESS;
643                 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
644                 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
645                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
646                         (lpfc_cmd->result & IOERR_DRVR_MASK))
647                                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
648         }
649
650         /*
651          * All outstanding txcmplq I/Os should have been aborted by the target.
652          * Unfortunately, some targets do not abide by this forcing the driver
653          * to double check.
654          */
655         lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
656                             lpfc_cmd->pCmd->device->id,
657                             lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
658
659         /* Return response IOCB to free list. */
660         list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
661         return ret;
662 }
663
664 static void
665 lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
666                             struct lpfc_iocbq *pIocbOut)
667 {
668         unsigned long iflag;
669         struct lpfc_scsi_buf *lpfc_cmd =
670                 (struct lpfc_scsi_buf *) pIocbIn->context1;
671
672         spin_lock_irqsave(phba->host->host_lock, iflag);
673         lpfc_free_scsi_buf(lpfc_cmd);
674         spin_unlock_irqrestore(phba->host->host_lock, iflag);
675 }
676
677 static void
678 lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
679                                 struct lpfc_iocbq *pIocbIn,
680                                 struct lpfc_iocbq *pIocbOut)
681 {
682         struct scsi_cmnd *ml_cmd =
683                 ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
684
685         lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
686         ml_cmd->host_scribble = NULL;
687 }
688
689 const char *
690 lpfc_info(struct Scsi_Host *host)
691 {
692         struct lpfc_hba    *phba = (struct lpfc_hba *) host->hostdata[0];
693         int len;
694         static char  lpfcinfobuf[384];
695
696         memset(lpfcinfobuf,0,384);
697         if (phba && phba->pcidev){
698                 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
699                 len = strlen(lpfcinfobuf);
700                 snprintf(lpfcinfobuf + len,
701                         384-len,
702                         " on PCI bus %02x device %02x irq %d",
703                         phba->pcidev->bus->number,
704                         phba->pcidev->devfn,
705                         phba->pcidev->irq);
706                 len = strlen(lpfcinfobuf);
707                 if (phba->Port[0]) {
708                         snprintf(lpfcinfobuf + len,
709                                  384-len,
710                                  " port %s",
711                                  phba->Port);
712                 }
713         }
714         return lpfcinfobuf;
715 }
716
717 static int
718 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
719 {
720         struct lpfc_hba *phba =
721                 (struct lpfc_hba *) cmnd->device->host->hostdata[0];
722         struct lpfc_sli *psli = &phba->sli;
723         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
724         struct lpfc_nodelist *ndlp = rdata->pnode;
725         struct lpfc_scsi_buf *lpfc_cmd = NULL;
726         struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
727         int err = 0;
728
729         /*
730          * The target pointer is guaranteed not to be NULL because the driver
731          * only clears the device->hostdata field in lpfc_slave_destroy.  This
732          * approach guarantees no further IO calls on this target.
733          */
734         if (!ndlp) {
735                 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
736                 goto out_fail_command;
737         }
738
739         /*
740          * A Fibre Channel target is present and functioning only when the node
741          * state is MAPPED.  Any other state is a failure.
742          */
743         if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
744                 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
745                     (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
746                         cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
747                         goto out_fail_command;
748                 }
749                 /*
750                  * The device is most likely recovered and the driver
751                  * needs a bit more time to finish.  Ask the midlayer
752                  * to retry.
753                  */
754                 goto out_host_busy;
755         }
756
757         list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
758         if (lpfc_cmd == NULL) {
759                 printk(KERN_WARNING "%s: No buffer available - list empty, "
760                        "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
761                 goto out_host_busy;
762         }
763
764         /*
765          * Store the midlayer's command structure for the completion phase
766          * and complete the command initialization.
767          */
768         lpfc_cmd->pCmd  = cmnd;
769         lpfc_cmd->rdata = rdata;
770         lpfc_cmd->timeout = 0;
771         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
772         cmnd->scsi_done = done;
773
774         err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
775         if (err)
776                 goto out_host_busy_free_buf;
777
778         lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
779
780         err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
781                                 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
782         if (err)
783                 goto out_host_busy_free_buf;
784         return 0;
785
786  out_host_busy_free_buf:
787         lpfc_free_scsi_buf(lpfc_cmd);
788         cmnd->host_scribble = NULL;
789  out_host_busy:
790         return SCSI_MLQUEUE_HOST_BUSY;
791
792  out_fail_command:
793         done(cmnd);
794         return 0;
795 }
796
797 static int
798 __lpfc_abort_handler(struct scsi_cmnd *cmnd)
799 {
800         struct lpfc_hba *phba =
801                         (struct lpfc_hba *)cmnd->device->host->hostdata[0];
802         struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
803         struct lpfc_iocbq *iocb, *next_iocb;
804         struct lpfc_iocbq *abtsiocb = NULL;
805         struct lpfc_scsi_buf *lpfc_cmd;
806         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
807         IOCB_t *cmd, *icmd;
808         unsigned long snum;
809         unsigned int id, lun;
810         unsigned int loop_count = 0;
811         int ret = IOCB_SUCCESS;
812
813         /*
814          * If the host_scribble data area is NULL, then the driver has already
815          * completed this command, but the midlayer did not see the completion
816          * before the eh fired.  Just return SUCCESS.
817          */
818         lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
819         if (!lpfc_cmd)
820                 return SUCCESS;
821
822         /* save these now since lpfc_cmd can be freed */
823         id   = lpfc_cmd->pCmd->device->id;
824         lun  = lpfc_cmd->pCmd->device->lun;
825         snum = lpfc_cmd->pCmd->serial_number;
826
827         list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
828                 cmd = &iocb->iocb;
829                 if (iocb->context1 != lpfc_cmd)
830                         continue;
831
832                 list_del_init(&iocb->list);
833                 pring->txq_cnt--;
834                 if (!iocb->iocb_cmpl) {
835                         list_add_tail(&iocb->list, lpfc_iocb_list);
836                 }
837                 else {
838                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
839                         cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
840                         lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
841                 }
842
843                 goto out;
844         }
845
846         list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
847         if (abtsiocb == NULL)
848                 return FAILED;
849
850         memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
851
852         /*
853          * The scsi command was not in the txq.  Check the txcmplq and if it is
854          * found, send an abort to the FW.
855          */
856         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
857                 if (iocb->context1 != lpfc_cmd)
858                         continue;
859
860                 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
861                 cmd = &iocb->iocb;
862                 icmd = &abtsiocb->iocb;
863                 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
864                 icmd->un.acxri.abortContextTag = cmd->ulpContext;
865                 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
866
867                 icmd->ulpLe = 1;
868                 icmd->ulpClass = cmd->ulpClass;
869                 if (phba->hba_state >= LPFC_LINK_UP)
870                         icmd->ulpCommand = CMD_ABORT_XRI_CN;
871                 else
872                         icmd->ulpCommand = CMD_CLOSE_XRI_CN;
873
874                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
875                 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
876                                                                 IOCB_ERROR) {
877                         list_add_tail(&abtsiocb->list, lpfc_iocb_list);
878                         ret = IOCB_ERROR;
879                         break;
880                 }
881
882                 /* Wait for abort to complete */
883                 while (cmnd->host_scribble)
884                 {
885                         spin_unlock_irq(phba->host->host_lock);
886                         set_current_state(TASK_UNINTERRUPTIBLE);
887                         schedule_timeout(LPFC_ABORT_WAIT*HZ);
888                         spin_lock_irq(phba->host->host_lock);
889                         if (++loop_count
890                             > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
891                                 break;
892                 }
893
894                 if(cmnd->host_scribble) {
895                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
896                                         "%d:0748 abort handler timed "
897                                         "out waiting for abort to "
898                                         "complete. Data: "
899                                         "x%x x%x x%x x%lx\n",
900                                         phba->brd_no, ret, id, lun, snum);
901                         cmnd->host_scribble = NULL;
902                         iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
903                         ret = IOCB_ERROR;
904                 }
905
906                 break;
907         }
908
909  out:
910         lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
911                         "%d:0749 SCSI layer issued abort device "
912                         "Data: x%x x%x x%x x%lx\n",
913                         phba->brd_no, ret, id, lun, snum);
914
915         return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
916 }
917
918 static int
919 lpfc_abort_handler(struct scsi_cmnd *cmnd)
920 {
921         int rc;
922         spin_lock_irq(cmnd->device->host->host_lock);
923         rc = __lpfc_abort_handler(cmnd);
924         spin_unlock_irq(cmnd->device->host->host_lock);
925         return rc;
926 }
927
928 static int
929 __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
930 {
931         struct Scsi_Host *shost = cmnd->device->host;
932         struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
933         struct lpfc_sli *psli = &phba->sli;
934         struct lpfc_scsi_buf *lpfc_cmd = NULL;
935         struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
936         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
937         struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
938         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
939         struct lpfc_nodelist *pnode = rdata->pnode;
940         int ret = FAILED;
941         int cnt, loopcnt;
942
943         /*
944          * If target is not in a MAPPED state, delay the reset until
945          * target is rediscovered or nodev timeout expires.
946          */
947         while ( 1 ) {
948                 if (!pnode)
949                         break;
950
951                 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
952                         spin_unlock_irq(phba->host->host_lock);
953                         set_current_state(TASK_UNINTERRUPTIBLE);
954                         schedule_timeout( HZ/2);
955                         spin_lock_irq(phba->host->host_lock);
956                 }
957                 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
958                         break;
959         }
960
961         list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
962         if (lpfc_cmd == NULL)
963                 goto out;
964
965         lpfc_cmd->pCmd = cmnd;
966         lpfc_cmd->timeout = 60;
967         lpfc_cmd->scsi_hba = phba;
968
969         ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
970         if (!ret)
971                 goto out_free_scsi_buf;
972
973         iocbq = &lpfc_cmd->cur_iocbq;
974
975         /* get a buffer for this IOCB command response */
976         list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
977         if (iocbqrsp == NULL)
978                 goto out_free_scsi_buf;
979
980         memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
981
982         iocbq->iocb_flag |= LPFC_IO_POLL;
983         iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
984
985         ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
986                      &phba->sli.ring[psli->fcp_ring],
987                      iocbq, 0, iocbqrsp, 60);
988         if (ret == IOCB_SUCCESS)
989                 ret = SUCCESS;
990
991         lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
992         lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
993         if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
994                 if (lpfc_cmd->result & IOERR_DRVR_MASK)
995                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
996
997         /*
998          * All outstanding txcmplq I/Os should have been aborted by the target.
999          * Unfortunately, some targets do not abide by this forcing the driver
1000          * to double check.
1001          */
1002         lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1003                             cmnd->device->id, cmnd->device->lun, 0,
1004                             LPFC_CTX_LUN);
1005
1006         loopcnt = 0;
1007         while((cnt = lpfc_sli_sum_iocb(phba,
1008                                        &phba->sli.ring[phba->sli.fcp_ring],
1009                                        cmnd->device->id, cmnd->device->lun,
1010                                        LPFC_CTX_LUN))) {
1011                 spin_unlock_irq(phba->host->host_lock);
1012                 set_current_state(TASK_UNINTERRUPTIBLE);
1013                 schedule_timeout(LPFC_RESET_WAIT*HZ);
1014                 spin_lock_irq(phba->host->host_lock);
1015
1016                 if (++loopcnt
1017                     > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
1018                         break;
1019         }
1020
1021         if (cnt) {
1022                 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1023                         "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1024                         phba->brd_no, cnt);
1025         }
1026
1027         list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
1028
1029 out_free_scsi_buf:
1030         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1031                         "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1032                         "Data: x%x x%x x%x\n",
1033                         phba->brd_no, lpfc_cmd->pCmd->device->id,
1034                         lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
1035                         lpfc_cmd->result);
1036         lpfc_free_scsi_buf(lpfc_cmd);
1037 out:
1038         return ret;
1039 }
1040
1041 static int
1042 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1043 {
1044         int rc;
1045         spin_lock_irq(cmnd->device->host->host_lock);
1046         rc = __lpfc_reset_lun_handler(cmnd);
1047         spin_unlock_irq(cmnd->device->host->host_lock);
1048         return rc;
1049 }
1050
1051 /*
1052  * Note: midlayer calls this function with the host_lock held
1053  */
1054 static int
1055 __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1056 {
1057         struct Scsi_Host *shost = cmnd->device->host;
1058         struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
1059         struct lpfc_nodelist *ndlp = NULL;
1060         int match;
1061         int ret = FAILED, i, err_count = 0;
1062         int cnt, loopcnt;
1063         unsigned int midlayer_id = 0;
1064         struct lpfc_scsi_buf * lpfc_cmd = NULL;
1065         struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1066
1067         list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1068         if (lpfc_cmd == NULL)
1069                 goto out;
1070
1071         /* The lpfc_cmd storage is reused.  Set all loop invariants. */
1072         lpfc_cmd->timeout = 60;
1073         lpfc_cmd->pCmd = cmnd;
1074         lpfc_cmd->scsi_hba = phba;
1075
1076         /*
1077          * Since the driver manages a single bus device, reset all
1078          * targets known to the driver.  Should any target reset
1079          * fail, this routine returns failure to the midlayer.
1080          */
1081         midlayer_id = cmnd->device->id;
1082         for (i = 0; i < MAX_FCP_TARGET; i++) {
1083                 /* Search the mapped list for this target ID */
1084                 match = 0;
1085                 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1086                         if ((i == ndlp->nlp_sid) && ndlp->rport) {
1087                                 match = 1;
1088                                 break;
1089                         }
1090                 }
1091                 if (!match)
1092                         continue;
1093
1094                 lpfc_cmd->pCmd->device->id = i;
1095                 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
1096                 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
1097                 if (ret != SUCCESS) {
1098                         lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1099                                 "%d:0713 Bus Reset on target %d failed\n",
1100                                 phba->brd_no, i);
1101                         err_count++;
1102                 }
1103         }
1104
1105         cmnd->device->id = midlayer_id;
1106         loopcnt = 0;
1107         while((cnt = lpfc_sli_sum_iocb(phba,
1108                                 &phba->sli.ring[phba->sli.fcp_ring],
1109                                 0, 0, LPFC_CTX_HOST))) {
1110                 spin_unlock_irq(phba->host->host_lock);
1111                 set_current_state(TASK_UNINTERRUPTIBLE);
1112                 schedule_timeout(LPFC_RESET_WAIT*HZ);
1113                 spin_lock_irq(phba->host->host_lock);
1114
1115                 if (++loopcnt
1116                     > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
1117                         break;
1118         }
1119
1120         if (cnt) {
1121                 /* flush all outstanding commands on the host */
1122                 i = lpfc_sli_abort_iocb(phba,
1123                                 &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
1124                                 LPFC_CTX_HOST);
1125
1126                 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1127                    "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1128                    phba->brd_no, cnt, i);
1129         }
1130
1131         if (!err_count)
1132                 ret = SUCCESS;
1133
1134         lpfc_free_scsi_buf(lpfc_cmd);
1135         lpfc_printf_log(phba,
1136                         KERN_ERR,
1137                         LOG_FCP,
1138                         "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1139                         phba->brd_no, ret);
1140 out:
1141         return ret;
1142 }
1143
1144 static int
1145 lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1146 {
1147         int rc;
1148         spin_lock_irq(cmnd->device->host->host_lock);
1149         rc = __lpfc_reset_bus_handler(cmnd);
1150         spin_unlock_irq(cmnd->device->host->host_lock);
1151         return rc;
1152 }
1153
1154 static int
1155 lpfc_slave_alloc(struct scsi_device *sdev)
1156 {
1157         struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
1158         struct lpfc_nodelist *ndlp = NULL;
1159         int match = 0;
1160         struct lpfc_scsi_buf *scsi_buf = NULL;
1161         uint32_t total = 0, i;
1162         uint32_t num_to_alloc = 0;
1163         unsigned long flags;
1164         struct list_head *listp;
1165         struct list_head *node_list[6];
1166
1167         /*
1168          * Store the target pointer in the scsi_device hostdata pointer provided
1169          * the driver has already discovered the target id.
1170          */
1171
1172         /* Search the nlp lists other than unmap_list for this target ID */
1173         node_list[0] = &phba->fc_npr_list;
1174         node_list[1] = &phba->fc_nlpmap_list;
1175         node_list[2] = &phba->fc_prli_list;
1176         node_list[3] = &phba->fc_reglogin_list;
1177         node_list[4] = &phba->fc_adisc_list;
1178         node_list[5] = &phba->fc_plogi_list;
1179
1180         for (i = 0; i < 6 && !match; i++) {
1181                 listp = node_list[i];
1182                 if (list_empty(listp))
1183                         continue;
1184                 list_for_each_entry(ndlp, listp, nlp_listp) {
1185                         if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
1186                                 match = 1;
1187                                 break;
1188                         }
1189                 }
1190         }
1191
1192         if (!match)
1193                 return -ENXIO;
1194
1195         sdev->hostdata = ndlp->rport->dd_data;
1196
1197         /*
1198          * Populate the cmds_per_lun count scsi_bufs into this host's globally
1199          * available list of scsi buffers.  Don't allocate more than the
1200          * HBA limit conveyed to the midlayer via the host structure.  Note
1201          * that this list of scsi bufs exists for the lifetime of the driver.
1202          */
1203         total = phba->total_scsi_bufs;
1204         num_to_alloc = LPFC_CMD_PER_LUN;
1205         if (total >= phba->cfg_hba_queue_depth) {
1206                 printk(KERN_WARNING "%s, At config limitation of "
1207                        "%d allocated scsi_bufs\n", __FUNCTION__, total);
1208                 return 0;
1209         } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
1210                 num_to_alloc = phba->cfg_hba_queue_depth - total;
1211         }
1212
1213         for (i = 0; i < num_to_alloc; i++) {
1214                 scsi_buf = lpfc_get_scsi_buf(phba);
1215                 if (!scsi_buf) {
1216                         printk(KERN_ERR "%s, failed to allocate "
1217                                "scsi_buf\n", __FUNCTION__);
1218                         break;
1219                 }
1220
1221                 spin_lock_irqsave(phba->host->host_lock, flags);
1222                 phba->total_scsi_bufs++;
1223                 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1224                 spin_unlock_irqrestore(phba->host->host_lock, flags);
1225         }
1226         return 0;
1227 }
1228
1229 static int
1230 lpfc_slave_configure(struct scsi_device *sdev)
1231 {
1232         struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
1233         struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1234
1235         if (sdev->tagged_supported)
1236                 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
1237         else
1238                 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
1239
1240         /*
1241          * Initialize the fc transport attributes for the target
1242          * containing this scsi device.  Also note that the driver's
1243          * target pointer is stored in the starget_data for the
1244          * driver's sysfs entry point functions.
1245          */
1246         rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
1247
1248         return 0;
1249 }
1250
1251 static void
1252 lpfc_slave_destroy(struct scsi_device *sdev)
1253 {
1254         sdev->hostdata = NULL;
1255         return;
1256 }
1257
1258 struct scsi_host_template lpfc_template = {
1259         .module                 = THIS_MODULE,
1260         .name                   = LPFC_DRIVER_NAME,
1261         .info                   = lpfc_info,
1262         .queuecommand           = lpfc_queuecommand,
1263         .eh_abort_handler       = lpfc_abort_handler,
1264         .eh_device_reset_handler= lpfc_reset_lun_handler,
1265         .eh_bus_reset_handler   = lpfc_reset_bus_handler,
1266         .slave_alloc            = lpfc_slave_alloc,
1267         .slave_configure        = lpfc_slave_configure,
1268         .slave_destroy          = lpfc_slave_destroy,
1269         .this_id                = -1,
1270         .sg_tablesize           = LPFC_SG_SEG_CNT,
1271         .cmd_per_lun            = LPFC_CMD_PER_LUN,
1272         .use_clustering         = ENABLE_CLUSTERING,
1273         .shost_attrs            = lpfc_host_attrs,
1274         .max_sectors            = 0xFFFF,
1275 };