]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/qla2xxx/qla_os.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[karo-tx-linux.git] / drivers / scsi / qla2xxx / qla_os.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/moduleparam.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/kthread.h>
13 #include <linux/mutex.h>
14 #include <linux/kobject.h>
15 #include <linux/slab.h>
16
17 #include <scsi/scsi_tcq.h>
18 #include <scsi/scsicam.h>
19 #include <scsi/scsi_transport.h>
20 #include <scsi/scsi_transport_fc.h>
21
22 /*
23  * Driver version
24  */
25 char qla2x00_version_str[40];
26
27 /*
28  * SRB allocation cache
29  */
30 static struct kmem_cache *srb_cachep;
31
32 int ql2xlogintimeout = 20;
33 module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
34 MODULE_PARM_DESC(ql2xlogintimeout,
35                 "Login timeout value in seconds.");
36
37 int qlport_down_retry;
38 module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
39 MODULE_PARM_DESC(qlport_down_retry,
40                 "Maximum number of command retries to a port that returns "
41                 "a PORT-DOWN status.");
42
43 int ql2xplogiabsentdevice;
44 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
45 MODULE_PARM_DESC(ql2xplogiabsentdevice,
46                 "Option to enable PLOGI to devices that are not present after "
47                 "a Fabric scan.  This is needed for several broken switches. "
48                 "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
49
50 int ql2xloginretrycount = 0;
51 module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
52 MODULE_PARM_DESC(ql2xloginretrycount,
53                 "Specify an alternate value for the NVRAM login retry count.");
54
55 int ql2xallocfwdump = 1;
56 module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
57 MODULE_PARM_DESC(ql2xallocfwdump,
58                 "Option to enable allocation of memory for a firmware dump "
59                 "during HBA initialization.  Memory allocation requirements "
60                 "vary by ISP type.  Default is 1 - allocate memory.");
61
62 int ql2xextended_error_logging;
63 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
64 MODULE_PARM_DESC(ql2xextended_error_logging,
65                 "Option to enable extended error logging, "
66                 "Default is 0 - no logging. 1 - log errors.");
67
68 static void qla2x00_free_device(scsi_qla_host_t *);
69
70 int ql2xfdmienable=1;
71 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
72 MODULE_PARM_DESC(ql2xfdmienable,
73                 "Enables FDMI registratons "
74                 "Default is 0 - no FDMI. 1 - perfom FDMI.");
75
76 #define MAX_Q_DEPTH    32
77 static int ql2xmaxqdepth = MAX_Q_DEPTH;
78 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(ql2xmaxqdepth,
80                 "Maximum queue depth to report for target devices.");
81
82 int ql2xiidmaenable=1;
83 module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
84 MODULE_PARM_DESC(ql2xiidmaenable,
85                 "Enables iIDMA settings "
86                 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
87
88 int ql2xmaxqueues = 1;
89 module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
90 MODULE_PARM_DESC(ql2xmaxqueues,
91                 "Enables MQ settings "
92                 "Default is 1 for single queue. Set it to number \
93                         of queues in MQ mode.");
94
95 int ql2xmultique_tag;
96 module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
97 MODULE_PARM_DESC(ql2xmultique_tag,
98                 "Enables CPU affinity settings for the driver "
99                 "Default is 0 for no affinity of request and response IO. "
100                 "Set it to 1 to turn on the cpu affinity.");
101
102 int ql2xfwloadbin;
103 module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
104 MODULE_PARM_DESC(ql2xfwloadbin,
105                 "Option to specify location from which to load ISP firmware:\n"
106                 " 2 -- load firmware via the request_firmware() (hotplug)\n"
107                 "      interface.\n"
108                 " 1 -- load firmware from flash.\n"
109                 " 0 -- use default semantics.\n");
110
111 int ql2xetsenable;
112 module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
113 MODULE_PARM_DESC(ql2xetsenable,
114                 "Enables firmware ETS burst."
115                 "Default is 0 - skip ETS enablement.");
116
117 /*
118  * SCSI host template entry points
119  */
120 static int qla2xxx_slave_configure(struct scsi_device * device);
121 static int qla2xxx_slave_alloc(struct scsi_device *);
122 static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
123 static void qla2xxx_scan_start(struct Scsi_Host *);
124 static void qla2xxx_slave_destroy(struct scsi_device *);
125 static int qla2xxx_queuecommand(struct scsi_cmnd *cmd,
126                 void (*fn)(struct scsi_cmnd *));
127 static int qla2xxx_eh_abort(struct scsi_cmnd *);
128 static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
129 static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
130 static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
131 static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
132
133 static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
134 static int qla2x00_change_queue_type(struct scsi_device *, int);
135
136 struct scsi_host_template qla2xxx_driver_template = {
137         .module                 = THIS_MODULE,
138         .name                   = QLA2XXX_DRIVER_NAME,
139         .queuecommand           = qla2xxx_queuecommand,
140
141         .eh_abort_handler       = qla2xxx_eh_abort,
142         .eh_device_reset_handler = qla2xxx_eh_device_reset,
143         .eh_target_reset_handler = qla2xxx_eh_target_reset,
144         .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
145         .eh_host_reset_handler  = qla2xxx_eh_host_reset,
146
147         .slave_configure        = qla2xxx_slave_configure,
148
149         .slave_alloc            = qla2xxx_slave_alloc,
150         .slave_destroy          = qla2xxx_slave_destroy,
151         .scan_finished          = qla2xxx_scan_finished,
152         .scan_start             = qla2xxx_scan_start,
153         .change_queue_depth     = qla2x00_change_queue_depth,
154         .change_queue_type      = qla2x00_change_queue_type,
155         .this_id                = -1,
156         .cmd_per_lun            = 3,
157         .use_clustering         = ENABLE_CLUSTERING,
158         .sg_tablesize           = SG_ALL,
159
160         .max_sectors            = 0xFFFF,
161         .shost_attrs            = qla2x00_host_attrs,
162 };
163
164 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
165 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
166
167 /* TODO Convert to inlines
168  *
169  * Timer routines
170  */
171
172 __inline__ void
173 qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
174 {
175         init_timer(&vha->timer);
176         vha->timer.expires = jiffies + interval * HZ;
177         vha->timer.data = (unsigned long)vha;
178         vha->timer.function = (void (*)(unsigned long))func;
179         add_timer(&vha->timer);
180         vha->timer_active = 1;
181 }
182
183 static inline void
184 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
185 {
186         mod_timer(&vha->timer, jiffies + interval * HZ);
187 }
188
189 static __inline__ void
190 qla2x00_stop_timer(scsi_qla_host_t *vha)
191 {
192         del_timer_sync(&vha->timer);
193         vha->timer_active = 0;
194 }
195
196 static int qla2x00_do_dpc(void *data);
197
198 static void qla2x00_rst_aen(scsi_qla_host_t *);
199
200 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
201         struct req_que **, struct rsp_que **);
202 static void qla2x00_mem_free(struct qla_hw_data *);
203 static void qla2x00_sp_free_dma(srb_t *);
204
205 /* -------------------------------------------------------------------------- */
206 static int qla2x00_alloc_queues(struct qla_hw_data *ha)
207 {
208         ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
209                                 GFP_KERNEL);
210         if (!ha->req_q_map) {
211                 qla_printk(KERN_WARNING, ha,
212                         "Unable to allocate memory for request queue ptrs\n");
213                 goto fail_req_map;
214         }
215
216         ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
217                                 GFP_KERNEL);
218         if (!ha->rsp_q_map) {
219                 qla_printk(KERN_WARNING, ha,
220                         "Unable to allocate memory for response queue ptrs\n");
221                 goto fail_rsp_map;
222         }
223         set_bit(0, ha->rsp_qid_map);
224         set_bit(0, ha->req_qid_map);
225         return 1;
226
227 fail_rsp_map:
228         kfree(ha->req_q_map);
229         ha->req_q_map = NULL;
230 fail_req_map:
231         return -ENOMEM;
232 }
233
234 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
235 {
236         if (req && req->ring)
237                 dma_free_coherent(&ha->pdev->dev,
238                 (req->length + 1) * sizeof(request_t),
239                 req->ring, req->dma);
240
241         kfree(req);
242         req = NULL;
243 }
244
245 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
246 {
247         if (rsp && rsp->ring)
248                 dma_free_coherent(&ha->pdev->dev,
249                 (rsp->length + 1) * sizeof(response_t),
250                 rsp->ring, rsp->dma);
251
252         kfree(rsp);
253         rsp = NULL;
254 }
255
256 static void qla2x00_free_queues(struct qla_hw_data *ha)
257 {
258         struct req_que *req;
259         struct rsp_que *rsp;
260         int cnt;
261
262         for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
263                 req = ha->req_q_map[cnt];
264                 qla2x00_free_req_que(ha, req);
265         }
266         kfree(ha->req_q_map);
267         ha->req_q_map = NULL;
268
269         for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
270                 rsp = ha->rsp_q_map[cnt];
271                 qla2x00_free_rsp_que(ha, rsp);
272         }
273         kfree(ha->rsp_q_map);
274         ha->rsp_q_map = NULL;
275 }
276
277 static int qla25xx_setup_mode(struct scsi_qla_host *vha)
278 {
279         uint16_t options = 0;
280         int ques, req, ret;
281         struct qla_hw_data *ha = vha->hw;
282
283         if (!(ha->fw_attributes & BIT_6)) {
284                 qla_printk(KERN_INFO, ha,
285                         "Firmware is not multi-queue capable\n");
286                 goto fail;
287         }
288         if (ql2xmultique_tag) {
289                 /* create a request queue for IO */
290                 options |= BIT_7;
291                 req = qla25xx_create_req_que(ha, options, 0, 0, -1,
292                         QLA_DEFAULT_QUE_QOS);
293                 if (!req) {
294                         qla_printk(KERN_WARNING, ha,
295                                 "Can't create request queue\n");
296                         goto fail;
297                 }
298                 ha->wq = create_workqueue("qla2xxx_wq");
299                 vha->req = ha->req_q_map[req];
300                 options |= BIT_1;
301                 for (ques = 1; ques < ha->max_rsp_queues; ques++) {
302                         ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
303                         if (!ret) {
304                                 qla_printk(KERN_WARNING, ha,
305                                         "Response Queue create failed\n");
306                                 goto fail2;
307                         }
308                 }
309                 ha->flags.cpu_affinity_enabled = 1;
310
311                 DEBUG2(qla_printk(KERN_INFO, ha,
312                         "CPU affinity mode enabled, no. of response"
313                         " queues:%d, no. of request queues:%d\n",
314                         ha->max_rsp_queues, ha->max_req_queues));
315         }
316         return 0;
317 fail2:
318         qla25xx_delete_queues(vha);
319         destroy_workqueue(ha->wq);
320         ha->wq = NULL;
321 fail:
322         ha->mqenable = 0;
323         kfree(ha->req_q_map);
324         kfree(ha->rsp_q_map);
325         ha->max_req_queues = ha->max_rsp_queues = 1;
326         return 1;
327 }
328
329 static char *
330 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
331 {
332         struct qla_hw_data *ha = vha->hw;
333         static char *pci_bus_modes[] = {
334                 "33", "66", "100", "133",
335         };
336         uint16_t pci_bus;
337
338         strcpy(str, "PCI");
339         pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
340         if (pci_bus) {
341                 strcat(str, "-X (");
342                 strcat(str, pci_bus_modes[pci_bus]);
343         } else {
344                 pci_bus = (ha->pci_attr & BIT_8) >> 8;
345                 strcat(str, " (");
346                 strcat(str, pci_bus_modes[pci_bus]);
347         }
348         strcat(str, " MHz)");
349
350         return (str);
351 }
352
353 static char *
354 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
355 {
356         static char *pci_bus_modes[] = { "33", "66", "100", "133", };
357         struct qla_hw_data *ha = vha->hw;
358         uint32_t pci_bus;
359         int pcie_reg;
360
361         pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
362         if (pcie_reg) {
363                 char lwstr[6];
364                 uint16_t pcie_lstat, lspeed, lwidth;
365
366                 pcie_reg += 0x12;
367                 pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
368                 lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
369                 lwidth = (pcie_lstat &
370                     (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
371
372                 strcpy(str, "PCIe (");
373                 if (lspeed == 1)
374                         strcat(str, "2.5GT/s ");
375                 else if (lspeed == 2)
376                         strcat(str, "5.0GT/s ");
377                 else
378                         strcat(str, "<unknown> ");
379                 snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
380                 strcat(str, lwstr);
381
382                 return str;
383         }
384
385         strcpy(str, "PCI");
386         pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
387         if (pci_bus == 0 || pci_bus == 8) {
388                 strcat(str, " (");
389                 strcat(str, pci_bus_modes[pci_bus >> 3]);
390         } else {
391                 strcat(str, "-X ");
392                 if (pci_bus & BIT_2)
393                         strcat(str, "Mode 2");
394                 else
395                         strcat(str, "Mode 1");
396                 strcat(str, " (");
397                 strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
398         }
399         strcat(str, " MHz)");
400
401         return str;
402 }
403
404 static char *
405 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
406 {
407         char un_str[10];
408         struct qla_hw_data *ha = vha->hw;
409
410         sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
411             ha->fw_minor_version,
412             ha->fw_subminor_version);
413
414         if (ha->fw_attributes & BIT_9) {
415                 strcat(str, "FLX");
416                 return (str);
417         }
418
419         switch (ha->fw_attributes & 0xFF) {
420         case 0x7:
421                 strcat(str, "EF");
422                 break;
423         case 0x17:
424                 strcat(str, "TP");
425                 break;
426         case 0x37:
427                 strcat(str, "IP");
428                 break;
429         case 0x77:
430                 strcat(str, "VI");
431                 break;
432         default:
433                 sprintf(un_str, "(%x)", ha->fw_attributes);
434                 strcat(str, un_str);
435                 break;
436         }
437         if (ha->fw_attributes & 0x100)
438                 strcat(str, "X");
439
440         return (str);
441 }
442
443 static char *
444 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
445 {
446         struct qla_hw_data *ha = vha->hw;
447
448         sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
449             ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
450         return str;
451 }
452
453 static inline srb_t *
454 qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
455     struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
456 {
457         srb_t *sp;
458         struct qla_hw_data *ha = vha->hw;
459
460         sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
461         if (!sp)
462                 return sp;
463
464         sp->fcport = fcport;
465         sp->cmd = cmd;
466         sp->flags = 0;
467         CMD_SP(cmd) = (void *)sp;
468         cmd->scsi_done = done;
469         sp->ctx = NULL;
470
471         return sp;
472 }
473
474 static int
475 qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
476 {
477         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
478         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
479         struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
480         struct qla_hw_data *ha = vha->hw;
481         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
482         srb_t *sp;
483         int rval;
484
485         if (ha->flags.eeh_busy) {
486                 if (ha->flags.pci_channel_io_perm_failure)
487                         cmd->result = DID_NO_CONNECT << 16;
488                 else
489                         cmd->result = DID_REQUEUE << 16;
490                 goto qc24_fail_command;
491         }
492
493         rval = fc_remote_port_chkready(rport);
494         if (rval) {
495                 cmd->result = rval;
496                 goto qc24_fail_command;
497         }
498
499         /* Close window on fcport/rport state-transitioning. */
500         if (fcport->drport)
501                 goto qc24_target_busy;
502
503         if (atomic_read(&fcport->state) != FCS_ONLINE) {
504                 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
505                     atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
506                         cmd->result = DID_NO_CONNECT << 16;
507                         goto qc24_fail_command;
508                 }
509                 goto qc24_target_busy;
510         }
511
512         spin_unlock_irq(vha->host->host_lock);
513
514         sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
515         if (!sp)
516                 goto qc24_host_busy_lock;
517
518         rval = ha->isp_ops->start_scsi(sp);
519         if (rval != QLA_SUCCESS)
520                 goto qc24_host_busy_free_sp;
521
522         spin_lock_irq(vha->host->host_lock);
523
524         return 0;
525
526 qc24_host_busy_free_sp:
527         qla2x00_sp_free_dma(sp);
528         mempool_free(sp, ha->srb_mempool);
529
530 qc24_host_busy_lock:
531         spin_lock_irq(vha->host->host_lock);
532         return SCSI_MLQUEUE_HOST_BUSY;
533
534 qc24_target_busy:
535         return SCSI_MLQUEUE_TARGET_BUSY;
536
537 qc24_fail_command:
538         done(cmd);
539
540         return 0;
541 }
542
543
544 /*
545  * qla2x00_eh_wait_on_command
546  *    Waits for the command to be returned by the Firmware for some
547  *    max time.
548  *
549  * Input:
550  *    cmd = Scsi Command to wait on.
551  *
552  * Return:
553  *    Not Found : 0
554  *    Found : 1
555  */
556 static int
557 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
558 {
559 #define ABORT_POLLING_PERIOD    1000
560 #define ABORT_WAIT_ITER         ((10 * 1000) / (ABORT_POLLING_PERIOD))
561         unsigned long wait_iter = ABORT_WAIT_ITER;
562         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
563         struct qla_hw_data *ha = vha->hw;
564         int ret = QLA_SUCCESS;
565
566         if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
567                 DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
568                 return ret;
569         }
570
571         while (CMD_SP(cmd) && wait_iter--) {
572                 msleep(ABORT_POLLING_PERIOD);
573         }
574         if (CMD_SP(cmd))
575                 ret = QLA_FUNCTION_FAILED;
576
577         return ret;
578 }
579
580 /*
581  * qla2x00_wait_for_hba_online
582  *    Wait till the HBA is online after going through
583  *    <= MAX_RETRIES_OF_ISP_ABORT  or
584  *    finally HBA is disabled ie marked offline
585  *
586  * Input:
587  *     ha - pointer to host adapter structure
588  *
589  * Note:
590  *    Does context switching-Release SPIN_LOCK
591  *    (if any) before calling this routine.
592  *
593  * Return:
594  *    Success (Adapter is online) : 0
595  *    Failed  (Adapter is offline/disabled) : 1
596  */
597 int
598 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
599 {
600         int             return_status;
601         unsigned long   wait_online;
602         struct qla_hw_data *ha = vha->hw;
603         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
604
605         wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
606         while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
607             test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
608             test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
609             ha->dpc_active) && time_before(jiffies, wait_online)) {
610
611                 msleep(1000);
612         }
613         if (base_vha->flags.online)
614                 return_status = QLA_SUCCESS;
615         else
616                 return_status = QLA_FUNCTION_FAILED;
617
618         return (return_status);
619 }
620
621 int
622 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
623 {
624         int             return_status;
625         unsigned long   wait_reset;
626         struct qla_hw_data *ha = vha->hw;
627         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
628
629         wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
630         while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
631             test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
632             test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
633             ha->dpc_active) && time_before(jiffies, wait_reset)) {
634
635                 msleep(1000);
636
637                 if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
638                     ha->flags.chip_reset_done)
639                         break;
640         }
641         if (ha->flags.chip_reset_done)
642                 return_status = QLA_SUCCESS;
643         else
644                 return_status = QLA_FUNCTION_FAILED;
645
646         return return_status;
647 }
648
649 /*
650  * qla2x00_wait_for_loop_ready
651  *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
652  *    to be in LOOP_READY state.
653  * Input:
654  *     ha - pointer to host adapter structure
655  *
656  * Note:
657  *    Does context switching-Release SPIN_LOCK
658  *    (if any) before calling this routine.
659  *
660  *
661  * Return:
662  *    Success (LOOP_READY) : 0
663  *    Failed  (LOOP_NOT_READY) : 1
664  */
665 static inline int
666 qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
667 {
668         int      return_status = QLA_SUCCESS;
669         unsigned long loop_timeout ;
670         struct qla_hw_data *ha = vha->hw;
671         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
672
673         /* wait for 5 min at the max for loop to be ready */
674         loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
675
676         while ((!atomic_read(&base_vha->loop_down_timer) &&
677             atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
678             atomic_read(&base_vha->loop_state) != LOOP_READY) {
679                 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
680                         return_status = QLA_FUNCTION_FAILED;
681                         break;
682                 }
683                 msleep(1000);
684                 if (time_after_eq(jiffies, loop_timeout)) {
685                         return_status = QLA_FUNCTION_FAILED;
686                         break;
687                 }
688         }
689         return (return_status);
690 }
691
692 /**************************************************************************
693 * qla2xxx_eh_abort
694 *
695 * Description:
696 *    The abort function will abort the specified command.
697 *
698 * Input:
699 *    cmd = Linux SCSI command packet to be aborted.
700 *
701 * Returns:
702 *    Either SUCCESS or FAILED.
703 *
704 * Note:
705 *    Only return FAILED if command not returned by firmware.
706 **************************************************************************/
707 static int
708 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 {
710         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
711         srb_t *sp;
712         int ret, i;
713         unsigned int id, lun;
714         unsigned long serial;
715         unsigned long flags;
716         int wait = 0;
717         struct qla_hw_data *ha = vha->hw;
718         struct req_que *req = vha->req;
719         srb_t *spt;
720
721         fc_block_scsi_eh(cmd);
722
723         if (!CMD_SP(cmd))
724                 return SUCCESS;
725
726         ret = SUCCESS;
727
728         id = cmd->device->id;
729         lun = cmd->device->lun;
730         serial = cmd->serial_number;
731         spt = (srb_t *) CMD_SP(cmd);
732         if (!spt)
733                 return SUCCESS;
734
735         /* Check active list for command command. */
736         spin_lock_irqsave(&ha->hardware_lock, flags);
737         for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
738                 sp = req->outstanding_cmds[i];
739
740                 if (sp == NULL)
741                         continue;
742                 if (sp->ctx)
743                         continue;
744                 if (sp->cmd != cmd)
745                         continue;
746
747                 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
748                 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
749
750                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
751                 if (ha->isp_ops->abort_command(sp)) {
752                         DEBUG2(printk("%s(%ld): abort_command "
753                         "mbx failed.\n", __func__, vha->host_no));
754                         ret = FAILED;
755                 } else {
756                         DEBUG3(printk("%s(%ld): abort_command "
757                         "mbx success.\n", __func__, vha->host_no));
758                         wait = 1;
759                 }
760                 spin_lock_irqsave(&ha->hardware_lock, flags);
761                 break;
762         }
763         spin_unlock_irqrestore(&ha->hardware_lock, flags);
764
765         /* Wait for the command to be returned. */
766         if (wait) {
767                 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
768                         qla_printk(KERN_ERR, ha,
769                             "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
770                             "%x.\n", vha->host_no, id, lun, serial, ret);
771                         ret = FAILED;
772                 }
773         }
774
775         qla_printk(KERN_INFO, ha,
776             "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
777             vha->host_no, id, lun, wait, serial, ret);
778
779         return ret;
780 }
781
782 enum nexus_wait_type {
783         WAIT_HOST = 0,
784         WAIT_TARGET,
785         WAIT_LUN,
786 };
787
788 static int
789 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
790         unsigned int l, srb_t *sp, enum nexus_wait_type type)
791 {
792         int cnt, match, status;
793         unsigned long flags;
794         struct qla_hw_data *ha = vha->hw;
795         struct req_que *req;
796
797         status = QLA_SUCCESS;
798         if (!sp)
799                 return status;
800
801         spin_lock_irqsave(&ha->hardware_lock, flags);
802         req = vha->req;
803         for (cnt = 1; status == QLA_SUCCESS &&
804                 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
805                 sp = req->outstanding_cmds[cnt];
806                 if (!sp)
807                         continue;
808                 if (sp->ctx)
809                         continue;
810                 if (vha->vp_idx != sp->fcport->vha->vp_idx)
811                         continue;
812                 match = 0;
813                 switch (type) {
814                 case WAIT_HOST:
815                         match = 1;
816                         break;
817                 case WAIT_TARGET:
818                         match = sp->cmd->device->id == t;
819                         break;
820                 case WAIT_LUN:
821                         match = (sp->cmd->device->id == t &&
822                                 sp->cmd->device->lun == l);
823                         break;
824                 }
825                 if (!match)
826                         continue;
827
828                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
829                 status = qla2x00_eh_wait_on_command(sp->cmd);
830                 spin_lock_irqsave(&ha->hardware_lock, flags);
831         }
832         spin_unlock_irqrestore(&ha->hardware_lock, flags);
833
834         return status;
835 }
836
837 static char *reset_errors[] = {
838         "HBA not online",
839         "HBA not ready",
840         "Task management failed",
841         "Waiting for command completions",
842 };
843
844 static int
845 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
846     struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
847 {
848         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
849         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
850         int err;
851
852         fc_block_scsi_eh(cmd);
853
854         if (!fcport)
855                 return FAILED;
856
857         qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
858             vha->host_no, cmd->device->id, cmd->device->lun, name);
859
860         err = 0;
861         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
862                 goto eh_reset_failed;
863         err = 1;
864         if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
865                 goto eh_reset_failed;
866         err = 2;
867         if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
868                 != QLA_SUCCESS)
869                 goto eh_reset_failed;
870         err = 3;
871         if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
872             cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
873                 goto eh_reset_failed;
874
875         qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
876             vha->host_no, cmd->device->id, cmd->device->lun, name);
877
878         return SUCCESS;
879
880  eh_reset_failed:
881         qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
882             , vha->host_no, cmd->device->id, cmd->device->lun, name,
883             reset_errors[err]);
884         return FAILED;
885 }
886
887 static int
888 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
889 {
890         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
891         struct qla_hw_data *ha = vha->hw;
892
893         return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
894             ha->isp_ops->lun_reset);
895 }
896
897 static int
898 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
899 {
900         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
901         struct qla_hw_data *ha = vha->hw;
902
903         return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
904             ha->isp_ops->target_reset);
905 }
906
907 /**************************************************************************
908 * qla2xxx_eh_bus_reset
909 *
910 * Description:
911 *    The bus reset function will reset the bus and abort any executing
912 *    commands.
913 *
914 * Input:
915 *    cmd = Linux SCSI command packet of the command that cause the
916 *          bus reset.
917 *
918 * Returns:
919 *    SUCCESS/FAILURE (defined as macro in scsi.h).
920 *
921 **************************************************************************/
922 static int
923 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
924 {
925         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
926         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
927         int ret = FAILED;
928         unsigned int id, lun;
929         unsigned long serial;
930         srb_t *sp = (srb_t *) CMD_SP(cmd);
931
932         fc_block_scsi_eh(cmd);
933
934         id = cmd->device->id;
935         lun = cmd->device->lun;
936         serial = cmd->serial_number;
937
938         if (!fcport)
939                 return ret;
940
941         qla_printk(KERN_INFO, vha->hw,
942             "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
943
944         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
945                 DEBUG2(printk("%s failed:board disabled\n",__func__));
946                 goto eh_bus_reset_done;
947         }
948
949         if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
950                 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
951                         ret = SUCCESS;
952         }
953         if (ret == FAILED)
954                 goto eh_bus_reset_done;
955
956         /* Flush outstanding commands. */
957         if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
958             QLA_SUCCESS)
959                 ret = FAILED;
960
961 eh_bus_reset_done:
962         qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
963             (ret == FAILED) ? "failed" : "succeded");
964
965         return ret;
966 }
967
968 /**************************************************************************
969 * qla2xxx_eh_host_reset
970 *
971 * Description:
972 *    The reset function will reset the Adapter.
973 *
974 * Input:
975 *      cmd = Linux SCSI command packet of the command that cause the
976 *            adapter reset.
977 *
978 * Returns:
979 *      Either SUCCESS or FAILED.
980 *
981 * Note:
982 **************************************************************************/
983 static int
984 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
985 {
986         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
987         fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
988         struct qla_hw_data *ha = vha->hw;
989         int ret = FAILED;
990         unsigned int id, lun;
991         unsigned long serial;
992         srb_t *sp = (srb_t *) CMD_SP(cmd);
993         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
994
995         fc_block_scsi_eh(cmd);
996
997         id = cmd->device->id;
998         lun = cmd->device->lun;
999         serial = cmd->serial_number;
1000
1001         if (!fcport)
1002                 return ret;
1003
1004         qla_printk(KERN_INFO, ha,
1005             "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
1006
1007         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1008                 goto eh_host_reset_lock;
1009
1010         /*
1011          * Fixme-may be dpc thread is active and processing
1012          * loop_resync,so wait a while for it to
1013          * be completed and then issue big hammer.Otherwise
1014          * it may cause I/O failure as big hammer marks the
1015          * devices as lost kicking of the port_down_timer
1016          * while dpc is stuck for the mailbox to complete.
1017          */
1018         qla2x00_wait_for_loop_ready(vha);
1019         if (vha != base_vha) {
1020                 if (qla2x00_vp_abort_isp(vha))
1021                         goto eh_host_reset_lock;
1022         } else {
1023                 if (ha->wq)
1024                         flush_workqueue(ha->wq);
1025
1026                 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1027                 if (qla2x00_abort_isp(base_vha)) {
1028                         clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1029                         /* failed. schedule dpc to try */
1030                         set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1031
1032                         if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1033                                 goto eh_host_reset_lock;
1034                 }
1035                 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1036         }
1037
1038         /* Waiting for command to be returned to OS.*/
1039         if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
1040                 QLA_SUCCESS)
1041                 ret = SUCCESS;
1042
1043 eh_host_reset_lock:
1044         qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1045             (ret == FAILED) ? "failed" : "succeded");
1046
1047         return ret;
1048 }
1049
1050 /*
1051 * qla2x00_loop_reset
1052 *      Issue loop reset.
1053 *
1054 * Input:
1055 *      ha = adapter block pointer.
1056 *
1057 * Returns:
1058 *      0 = success
1059 */
1060 int
1061 qla2x00_loop_reset(scsi_qla_host_t *vha)
1062 {
1063         int ret;
1064         struct fc_port *fcport;
1065         struct qla_hw_data *ha = vha->hw;
1066
1067         if (ha->flags.enable_target_reset) {
1068                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1069                         if (fcport->port_type != FCT_TARGET)
1070                                 continue;
1071
1072                         ret = ha->isp_ops->target_reset(fcport, 0, 0);
1073                         if (ret != QLA_SUCCESS) {
1074                                 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1075                                     "target_reset=%d d_id=%x.\n", __func__,
1076                                     vha->host_no, ret, fcport->d_id.b24));
1077                         }
1078                 }
1079         }
1080
1081         if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
1082                 ret = qla2x00_full_login_lip(vha);
1083                 if (ret != QLA_SUCCESS) {
1084                         DEBUG2_3(printk("%s(%ld): failed: "
1085                             "full_login_lip=%d.\n", __func__, vha->host_no,
1086                             ret));
1087                 }
1088                 atomic_set(&vha->loop_state, LOOP_DOWN);
1089                 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1090                 qla2x00_mark_all_devices_lost(vha, 0);
1091                 qla2x00_wait_for_loop_ready(vha);
1092         }
1093
1094         if (ha->flags.enable_lip_reset) {
1095                 ret = qla2x00_lip_reset(vha);
1096                 if (ret != QLA_SUCCESS) {
1097                         DEBUG2_3(printk("%s(%ld): failed: "
1098                             "lip_reset=%d.\n", __func__, vha->host_no, ret));
1099                 } else
1100                         qla2x00_wait_for_loop_ready(vha);
1101         }
1102
1103         /* Issue marker command only when we are going to start the I/O */
1104         vha->marker_needed = 1;
1105
1106         return QLA_SUCCESS;
1107 }
1108
1109 void
1110 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1111 {
1112         int que, cnt;
1113         unsigned long flags;
1114         srb_t *sp;
1115         struct srb_ctx *ctx;
1116         struct qla_hw_data *ha = vha->hw;
1117         struct req_que *req;
1118
1119         spin_lock_irqsave(&ha->hardware_lock, flags);
1120         for (que = 0; que < ha->max_req_queues; que++) {
1121                 req = ha->req_q_map[que];
1122                 if (!req)
1123                         continue;
1124                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1125                         sp = req->outstanding_cmds[cnt];
1126                         if (sp) {
1127                                 req->outstanding_cmds[cnt] = NULL;
1128                                 if (!sp->ctx) {
1129                                         sp->cmd->result = res;
1130                                         qla2x00_sp_compl(ha, sp);
1131                                 } else {
1132                                         ctx = sp->ctx;
1133                                         if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
1134                                                 del_timer_sync(&ctx->timer);
1135                                                 ctx->free(sp);
1136                                         } else {
1137                                                 struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
1138                                                 if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
1139                                                         kfree(sp->fcport);
1140                                                 sp_bsg->bsg_job->req->errors = 0;
1141                                                 sp_bsg->bsg_job->reply->result = res;
1142                                                 sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
1143                                                 kfree(sp->ctx);
1144                                                 mempool_free(sp, ha->srb_mempool);
1145                                         }
1146                                 }
1147                         }
1148                 }
1149         }
1150         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1151 }
1152
1153 static int
1154 qla2xxx_slave_alloc(struct scsi_device *sdev)
1155 {
1156         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1157
1158         if (!rport || fc_remote_port_chkready(rport))
1159                 return -ENXIO;
1160
1161         sdev->hostdata = *(fc_port_t **)rport->dd_data;
1162
1163         return 0;
1164 }
1165
1166 static int
1167 qla2xxx_slave_configure(struct scsi_device *sdev)
1168 {
1169         scsi_qla_host_t *vha = shost_priv(sdev->host);
1170         struct qla_hw_data *ha = vha->hw;
1171         struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1172         struct req_que *req = vha->req;
1173
1174         if (sdev->tagged_supported)
1175                 scsi_activate_tcq(sdev, req->max_q_depth);
1176         else
1177                 scsi_deactivate_tcq(sdev, req->max_q_depth);
1178
1179         rport->dev_loss_tmo = ha->port_down_retry_count;
1180
1181         return 0;
1182 }
1183
1184 static void
1185 qla2xxx_slave_destroy(struct scsi_device *sdev)
1186 {
1187         sdev->hostdata = NULL;
1188 }
1189
1190 static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
1191 {
1192         fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1193
1194         if (!scsi_track_queue_full(sdev, qdepth))
1195                 return;
1196
1197         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
1198                 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
1199                 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1200                 sdev->queue_depth));
1201 }
1202
1203 static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
1204 {
1205         fc_port_t *fcport = sdev->hostdata;
1206         struct scsi_qla_host *vha = fcport->vha;
1207         struct qla_hw_data *ha = vha->hw;
1208         struct req_que *req = NULL;
1209
1210         req = vha->req;
1211         if (!req)
1212                 return;
1213
1214         if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
1215                 return;
1216
1217         if (sdev->ordered_tags)
1218                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
1219         else
1220                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
1221
1222         DEBUG2(qla_printk(KERN_INFO, ha,
1223                "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
1224                fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
1225                sdev->queue_depth));
1226 }
1227
1228 static int
1229 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
1230 {
1231         switch (reason) {
1232         case SCSI_QDEPTH_DEFAULT:
1233                 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
1234                 break;
1235         case SCSI_QDEPTH_QFULL:
1236                 qla2x00_handle_queue_full(sdev, qdepth);
1237                 break;
1238         case SCSI_QDEPTH_RAMP_UP:
1239                 qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
1240                 break;
1241         default:
1242                 return -EOPNOTSUPP;
1243         }
1244
1245         return sdev->queue_depth;
1246 }
1247
1248 static int
1249 qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1250 {
1251         if (sdev->tagged_supported) {
1252                 scsi_set_tag_type(sdev, tag_type);
1253                 if (tag_type)
1254                         scsi_activate_tcq(sdev, sdev->queue_depth);
1255                 else
1256                         scsi_deactivate_tcq(sdev, sdev->queue_depth);
1257         } else
1258                 tag_type = 0;
1259
1260         return tag_type;
1261 }
1262
1263 /**
1264  * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1265  * @ha: HA context
1266  *
1267  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1268  * supported addressing method.
1269  */
1270 static void
1271 qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1272 {
1273         /* Assume a 32bit DMA mask. */
1274         ha->flags.enable_64bit_addressing = 0;
1275
1276         if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1277                 /* Any upper-dword bits set? */
1278                 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1279                     !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1280                         /* Ok, a 64bit DMA mask is applicable. */
1281                         ha->flags.enable_64bit_addressing = 1;
1282                         ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1283                         ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1284                         return;
1285                 }
1286         }
1287
1288         dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1289         pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1290 }
1291
1292 static void
1293 qla2x00_enable_intrs(struct qla_hw_data *ha)
1294 {
1295         unsigned long flags = 0;
1296         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1297
1298         spin_lock_irqsave(&ha->hardware_lock, flags);
1299         ha->interrupts_on = 1;
1300         /* enable risc and host interrupts */
1301         WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1302         RD_REG_WORD(&reg->ictrl);
1303         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1304
1305 }
1306
1307 static void
1308 qla2x00_disable_intrs(struct qla_hw_data *ha)
1309 {
1310         unsigned long flags = 0;
1311         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1312
1313         spin_lock_irqsave(&ha->hardware_lock, flags);
1314         ha->interrupts_on = 0;
1315         /* disable risc and host interrupts */
1316         WRT_REG_WORD(&reg->ictrl, 0);
1317         RD_REG_WORD(&reg->ictrl);
1318         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1319 }
1320
1321 static void
1322 qla24xx_enable_intrs(struct qla_hw_data *ha)
1323 {
1324         unsigned long flags = 0;
1325         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1326
1327         spin_lock_irqsave(&ha->hardware_lock, flags);
1328         ha->interrupts_on = 1;
1329         WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1330         RD_REG_DWORD(&reg->ictrl);
1331         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1332 }
1333
1334 static void
1335 qla24xx_disable_intrs(struct qla_hw_data *ha)
1336 {
1337         unsigned long flags = 0;
1338         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1339
1340         if (IS_NOPOLLING_TYPE(ha))
1341                 return;
1342         spin_lock_irqsave(&ha->hardware_lock, flags);
1343         ha->interrupts_on = 0;
1344         WRT_REG_DWORD(&reg->ictrl, 0);
1345         RD_REG_DWORD(&reg->ictrl);
1346         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1347 }
1348
1349 static struct isp_operations qla2100_isp_ops = {
1350         .pci_config             = qla2100_pci_config,
1351         .reset_chip             = qla2x00_reset_chip,
1352         .chip_diag              = qla2x00_chip_diag,
1353         .config_rings           = qla2x00_config_rings,
1354         .reset_adapter          = qla2x00_reset_adapter,
1355         .nvram_config           = qla2x00_nvram_config,
1356         .update_fw_options      = qla2x00_update_fw_options,
1357         .load_risc              = qla2x00_load_risc,
1358         .pci_info_str           = qla2x00_pci_info_str,
1359         .fw_version_str         = qla2x00_fw_version_str,
1360         .intr_handler           = qla2100_intr_handler,
1361         .enable_intrs           = qla2x00_enable_intrs,
1362         .disable_intrs          = qla2x00_disable_intrs,
1363         .abort_command          = qla2x00_abort_command,
1364         .target_reset           = qla2x00_abort_target,
1365         .lun_reset              = qla2x00_lun_reset,
1366         .fabric_login           = qla2x00_login_fabric,
1367         .fabric_logout          = qla2x00_fabric_logout,
1368         .calc_req_entries       = qla2x00_calc_iocbs_32,
1369         .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1370         .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1371         .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1372         .read_nvram             = qla2x00_read_nvram_data,
1373         .write_nvram            = qla2x00_write_nvram_data,
1374         .fw_dump                = qla2100_fw_dump,
1375         .beacon_on              = NULL,
1376         .beacon_off             = NULL,
1377         .beacon_blink           = NULL,
1378         .read_optrom            = qla2x00_read_optrom_data,
1379         .write_optrom           = qla2x00_write_optrom_data,
1380         .get_flash_version      = qla2x00_get_flash_version,
1381         .start_scsi             = qla2x00_start_scsi,
1382 };
1383
1384 static struct isp_operations qla2300_isp_ops = {
1385         .pci_config             = qla2300_pci_config,
1386         .reset_chip             = qla2x00_reset_chip,
1387         .chip_diag              = qla2x00_chip_diag,
1388         .config_rings           = qla2x00_config_rings,
1389         .reset_adapter          = qla2x00_reset_adapter,
1390         .nvram_config           = qla2x00_nvram_config,
1391         .update_fw_options      = qla2x00_update_fw_options,
1392         .load_risc              = qla2x00_load_risc,
1393         .pci_info_str           = qla2x00_pci_info_str,
1394         .fw_version_str         = qla2x00_fw_version_str,
1395         .intr_handler           = qla2300_intr_handler,
1396         .enable_intrs           = qla2x00_enable_intrs,
1397         .disable_intrs          = qla2x00_disable_intrs,
1398         .abort_command          = qla2x00_abort_command,
1399         .target_reset           = qla2x00_abort_target,
1400         .lun_reset              = qla2x00_lun_reset,
1401         .fabric_login           = qla2x00_login_fabric,
1402         .fabric_logout          = qla2x00_fabric_logout,
1403         .calc_req_entries       = qla2x00_calc_iocbs_32,
1404         .build_iocbs            = qla2x00_build_scsi_iocbs_32,
1405         .prep_ms_iocb           = qla2x00_prep_ms_iocb,
1406         .prep_ms_fdmi_iocb      = qla2x00_prep_ms_fdmi_iocb,
1407         .read_nvram             = qla2x00_read_nvram_data,
1408         .write_nvram            = qla2x00_write_nvram_data,
1409         .fw_dump                = qla2300_fw_dump,
1410         .beacon_on              = qla2x00_beacon_on,
1411         .beacon_off             = qla2x00_beacon_off,
1412         .beacon_blink           = qla2x00_beacon_blink,
1413         .read_optrom            = qla2x00_read_optrom_data,
1414         .write_optrom           = qla2x00_write_optrom_data,
1415         .get_flash_version      = qla2x00_get_flash_version,
1416         .start_scsi             = qla2x00_start_scsi,
1417 };
1418
1419 static struct isp_operations qla24xx_isp_ops = {
1420         .pci_config             = qla24xx_pci_config,
1421         .reset_chip             = qla24xx_reset_chip,
1422         .chip_diag              = qla24xx_chip_diag,
1423         .config_rings           = qla24xx_config_rings,
1424         .reset_adapter          = qla24xx_reset_adapter,
1425         .nvram_config           = qla24xx_nvram_config,
1426         .update_fw_options      = qla24xx_update_fw_options,
1427         .load_risc              = qla24xx_load_risc,
1428         .pci_info_str           = qla24xx_pci_info_str,
1429         .fw_version_str         = qla24xx_fw_version_str,
1430         .intr_handler           = qla24xx_intr_handler,
1431         .enable_intrs           = qla24xx_enable_intrs,
1432         .disable_intrs          = qla24xx_disable_intrs,
1433         .abort_command          = qla24xx_abort_command,
1434         .target_reset           = qla24xx_abort_target,
1435         .lun_reset              = qla24xx_lun_reset,
1436         .fabric_login           = qla24xx_login_fabric,
1437         .fabric_logout          = qla24xx_fabric_logout,
1438         .calc_req_entries       = NULL,
1439         .build_iocbs            = NULL,
1440         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1441         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1442         .read_nvram             = qla24xx_read_nvram_data,
1443         .write_nvram            = qla24xx_write_nvram_data,
1444         .fw_dump                = qla24xx_fw_dump,
1445         .beacon_on              = qla24xx_beacon_on,
1446         .beacon_off             = qla24xx_beacon_off,
1447         .beacon_blink           = qla24xx_beacon_blink,
1448         .read_optrom            = qla24xx_read_optrom_data,
1449         .write_optrom           = qla24xx_write_optrom_data,
1450         .get_flash_version      = qla24xx_get_flash_version,
1451         .start_scsi             = qla24xx_start_scsi,
1452 };
1453
1454 static struct isp_operations qla25xx_isp_ops = {
1455         .pci_config             = qla25xx_pci_config,
1456         .reset_chip             = qla24xx_reset_chip,
1457         .chip_diag              = qla24xx_chip_diag,
1458         .config_rings           = qla24xx_config_rings,
1459         .reset_adapter          = qla24xx_reset_adapter,
1460         .nvram_config           = qla24xx_nvram_config,
1461         .update_fw_options      = qla24xx_update_fw_options,
1462         .load_risc              = qla24xx_load_risc,
1463         .pci_info_str           = qla24xx_pci_info_str,
1464         .fw_version_str         = qla24xx_fw_version_str,
1465         .intr_handler           = qla24xx_intr_handler,
1466         .enable_intrs           = qla24xx_enable_intrs,
1467         .disable_intrs          = qla24xx_disable_intrs,
1468         .abort_command          = qla24xx_abort_command,
1469         .target_reset           = qla24xx_abort_target,
1470         .lun_reset              = qla24xx_lun_reset,
1471         .fabric_login           = qla24xx_login_fabric,
1472         .fabric_logout          = qla24xx_fabric_logout,
1473         .calc_req_entries       = NULL,
1474         .build_iocbs            = NULL,
1475         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1476         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1477         .read_nvram             = qla25xx_read_nvram_data,
1478         .write_nvram            = qla25xx_write_nvram_data,
1479         .fw_dump                = qla25xx_fw_dump,
1480         .beacon_on              = qla24xx_beacon_on,
1481         .beacon_off             = qla24xx_beacon_off,
1482         .beacon_blink           = qla24xx_beacon_blink,
1483         .read_optrom            = qla25xx_read_optrom_data,
1484         .write_optrom           = qla24xx_write_optrom_data,
1485         .get_flash_version      = qla24xx_get_flash_version,
1486         .start_scsi             = qla24xx_start_scsi,
1487 };
1488
1489 static struct isp_operations qla81xx_isp_ops = {
1490         .pci_config             = qla25xx_pci_config,
1491         .reset_chip             = qla24xx_reset_chip,
1492         .chip_diag              = qla24xx_chip_diag,
1493         .config_rings           = qla24xx_config_rings,
1494         .reset_adapter          = qla24xx_reset_adapter,
1495         .nvram_config           = qla81xx_nvram_config,
1496         .update_fw_options      = qla81xx_update_fw_options,
1497         .load_risc              = qla81xx_load_risc,
1498         .pci_info_str           = qla24xx_pci_info_str,
1499         .fw_version_str         = qla24xx_fw_version_str,
1500         .intr_handler           = qla24xx_intr_handler,
1501         .enable_intrs           = qla24xx_enable_intrs,
1502         .disable_intrs          = qla24xx_disable_intrs,
1503         .abort_command          = qla24xx_abort_command,
1504         .target_reset           = qla24xx_abort_target,
1505         .lun_reset              = qla24xx_lun_reset,
1506         .fabric_login           = qla24xx_login_fabric,
1507         .fabric_logout          = qla24xx_fabric_logout,
1508         .calc_req_entries       = NULL,
1509         .build_iocbs            = NULL,
1510         .prep_ms_iocb           = qla24xx_prep_ms_iocb,
1511         .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
1512         .read_nvram             = NULL,
1513         .write_nvram            = NULL,
1514         .fw_dump                = qla81xx_fw_dump,
1515         .beacon_on              = qla24xx_beacon_on,
1516         .beacon_off             = qla24xx_beacon_off,
1517         .beacon_blink           = qla24xx_beacon_blink,
1518         .read_optrom            = qla25xx_read_optrom_data,
1519         .write_optrom           = qla24xx_write_optrom_data,
1520         .get_flash_version      = qla24xx_get_flash_version,
1521         .start_scsi             = qla24xx_start_scsi,
1522 };
1523
1524 static inline void
1525 qla2x00_set_isp_flags(struct qla_hw_data *ha)
1526 {
1527         ha->device_type = DT_EXTENDED_IDS;
1528         switch (ha->pdev->device) {
1529         case PCI_DEVICE_ID_QLOGIC_ISP2100:
1530                 ha->device_type |= DT_ISP2100;
1531                 ha->device_type &= ~DT_EXTENDED_IDS;
1532                 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1533                 break;
1534         case PCI_DEVICE_ID_QLOGIC_ISP2200:
1535                 ha->device_type |= DT_ISP2200;
1536                 ha->device_type &= ~DT_EXTENDED_IDS;
1537                 ha->fw_srisc_address = RISC_START_ADDRESS_2100;
1538                 break;
1539         case PCI_DEVICE_ID_QLOGIC_ISP2300:
1540                 ha->device_type |= DT_ISP2300;
1541                 ha->device_type |= DT_ZIO_SUPPORTED;
1542                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1543                 break;
1544         case PCI_DEVICE_ID_QLOGIC_ISP2312:
1545                 ha->device_type |= DT_ISP2312;
1546                 ha->device_type |= DT_ZIO_SUPPORTED;
1547                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1548                 break;
1549         case PCI_DEVICE_ID_QLOGIC_ISP2322:
1550                 ha->device_type |= DT_ISP2322;
1551                 ha->device_type |= DT_ZIO_SUPPORTED;
1552                 if (ha->pdev->subsystem_vendor == 0x1028 &&
1553                     ha->pdev->subsystem_device == 0x0170)
1554                         ha->device_type |= DT_OEM_001;
1555                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1556                 break;
1557         case PCI_DEVICE_ID_QLOGIC_ISP6312:
1558                 ha->device_type |= DT_ISP6312;
1559                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1560                 break;
1561         case PCI_DEVICE_ID_QLOGIC_ISP6322:
1562                 ha->device_type |= DT_ISP6322;
1563                 ha->fw_srisc_address = RISC_START_ADDRESS_2300;
1564                 break;
1565         case PCI_DEVICE_ID_QLOGIC_ISP2422:
1566                 ha->device_type |= DT_ISP2422;
1567                 ha->device_type |= DT_ZIO_SUPPORTED;
1568                 ha->device_type |= DT_FWI2;
1569                 ha->device_type |= DT_IIDMA;
1570                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1571                 break;
1572         case PCI_DEVICE_ID_QLOGIC_ISP2432:
1573                 ha->device_type |= DT_ISP2432;
1574                 ha->device_type |= DT_ZIO_SUPPORTED;
1575                 ha->device_type |= DT_FWI2;
1576                 ha->device_type |= DT_IIDMA;
1577                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1578                 break;
1579         case PCI_DEVICE_ID_QLOGIC_ISP8432:
1580                 ha->device_type |= DT_ISP8432;
1581                 ha->device_type |= DT_ZIO_SUPPORTED;
1582                 ha->device_type |= DT_FWI2;
1583                 ha->device_type |= DT_IIDMA;
1584                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1585                 break;
1586         case PCI_DEVICE_ID_QLOGIC_ISP5422:
1587                 ha->device_type |= DT_ISP5422;
1588                 ha->device_type |= DT_FWI2;
1589                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1590                 break;
1591         case PCI_DEVICE_ID_QLOGIC_ISP5432:
1592                 ha->device_type |= DT_ISP5432;
1593                 ha->device_type |= DT_FWI2;
1594                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1595                 break;
1596         case PCI_DEVICE_ID_QLOGIC_ISP2532:
1597                 ha->device_type |= DT_ISP2532;
1598                 ha->device_type |= DT_ZIO_SUPPORTED;
1599                 ha->device_type |= DT_FWI2;
1600                 ha->device_type |= DT_IIDMA;
1601                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1602                 break;
1603         case PCI_DEVICE_ID_QLOGIC_ISP8001:
1604                 ha->device_type |= DT_ISP8001;
1605                 ha->device_type |= DT_ZIO_SUPPORTED;
1606                 ha->device_type |= DT_FWI2;
1607                 ha->device_type |= DT_IIDMA;
1608                 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
1609                 break;
1610         }
1611
1612         /* Get adapter physical port no from interrupt pin register. */
1613         pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
1614         if (ha->port_no & 1)
1615                 ha->flags.port0 = 1;
1616         else
1617                 ha->flags.port0 = 0;
1618 }
1619
1620 static int
1621 qla2x00_iospace_config(struct qla_hw_data *ha)
1622 {
1623         resource_size_t pio;
1624         uint16_t msix;
1625         int cpus;
1626
1627         if (pci_request_selected_regions(ha->pdev, ha->bars,
1628             QLA2XXX_DRIVER_NAME)) {
1629                 qla_printk(KERN_WARNING, ha,
1630                     "Failed to reserve PIO/MMIO regions (%s)\n",
1631                     pci_name(ha->pdev));
1632
1633                 goto iospace_error_exit;
1634         }
1635         if (!(ha->bars & 1))
1636                 goto skip_pio;
1637
1638         /* We only need PIO for Flash operations on ISP2312 v2 chips. */
1639         pio = pci_resource_start(ha->pdev, 0);
1640         if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
1641                 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1642                         qla_printk(KERN_WARNING, ha,
1643                             "Invalid PCI I/O region size (%s)...\n",
1644                                 pci_name(ha->pdev));
1645                         pio = 0;
1646                 }
1647         } else {
1648                 qla_printk(KERN_WARNING, ha,
1649                     "region #0 not a PIO resource (%s)...\n",
1650                     pci_name(ha->pdev));
1651                 pio = 0;
1652         }
1653         ha->pio_address = pio;
1654
1655 skip_pio:
1656         /* Use MMIO operations for all accesses. */
1657         if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
1658                 qla_printk(KERN_ERR, ha,
1659                     "region #1 not an MMIO resource (%s), aborting\n",
1660                     pci_name(ha->pdev));
1661                 goto iospace_error_exit;
1662         }
1663         if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
1664                 qla_printk(KERN_ERR, ha,
1665                     "Invalid PCI mem region size (%s), aborting\n",
1666                         pci_name(ha->pdev));
1667                 goto iospace_error_exit;
1668         }
1669
1670         ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
1671         if (!ha->iobase) {
1672                 qla_printk(KERN_ERR, ha,
1673                     "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
1674
1675                 goto iospace_error_exit;
1676         }
1677
1678         /* Determine queue resources */
1679         ha->max_req_queues = ha->max_rsp_queues = 1;
1680         if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
1681                 (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
1682                 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1683                 goto mqiobase_exit;
1684
1685         ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1686                         pci_resource_len(ha->pdev, 3));
1687         if (ha->mqiobase) {
1688                 /* Read MSIX vector size of the board */
1689                 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1690                 ha->msix_count = msix;
1691                 /* Max queues are bounded by available msix vectors */
1692                 /* queue 0 uses two msix vectors */
1693                 if (ql2xmultique_tag) {
1694                         cpus = num_online_cpus();
1695                         ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1696                                 (cpus + 1) : (ha->msix_count - 1);
1697                         ha->max_req_queues = 2;
1698                 } else if (ql2xmaxqueues > 1) {
1699                         ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1700                                                 QLA_MQ_SIZE : ql2xmaxqueues;
1701                         DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1702                         " of request queues:%d\n", ha->max_req_queues));
1703                 }
1704                 qla_printk(KERN_INFO, ha,
1705                         "MSI-X vector count: %d\n", msix);
1706         } else
1707                 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1708
1709 mqiobase_exit:
1710         ha->msix_count = ha->max_rsp_queues + 1;
1711         return (0);
1712
1713 iospace_error_exit:
1714         return (-ENOMEM);
1715 }
1716
1717 static void
1718 qla2xxx_scan_start(struct Scsi_Host *shost)
1719 {
1720         scsi_qla_host_t *vha = shost_priv(shost);
1721
1722         if (vha->hw->flags.running_gold_fw)
1723                 return;
1724
1725         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1726         set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1727         set_bit(RSCN_UPDATE, &vha->dpc_flags);
1728         set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1729 }
1730
1731 static int
1732 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1733 {
1734         scsi_qla_host_t *vha = shost_priv(shost);
1735
1736         if (!vha->host)
1737                 return 1;
1738         if (time > vha->hw->loop_reset_delay * HZ)
1739                 return 1;
1740
1741         return atomic_read(&vha->loop_state) == LOOP_READY;
1742 }
1743
1744 /*
1745  * PCI driver interface
1746  */
1747 static int __devinit
1748 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1749 {
1750         int     ret = -ENODEV;
1751         struct Scsi_Host *host;
1752         scsi_qla_host_t *base_vha = NULL;
1753         struct qla_hw_data *ha;
1754         char pci_info[30];
1755         char fw_str[30];
1756         struct scsi_host_template *sht;
1757         int bars, max_id, mem_only = 0;
1758         uint16_t req_length = 0, rsp_length = 0;
1759         struct req_que *req = NULL;
1760         struct rsp_que *rsp = NULL;
1761
1762         bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1763         sht = &qla2xxx_driver_template;
1764         if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
1765             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
1766             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
1767             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
1768             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1769             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1770             pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001) {
1771                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1772                 mem_only = 1;
1773         }
1774
1775         if (mem_only) {
1776                 if (pci_enable_device_mem(pdev))
1777                         goto probe_out;
1778         } else {
1779                 if (pci_enable_device(pdev))
1780                         goto probe_out;
1781         }
1782
1783         /* This may fail but that's ok */
1784         pci_enable_pcie_error_reporting(pdev);
1785
1786         ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1787         if (!ha) {
1788                 DEBUG(printk("Unable to allocate memory for ha\n"));
1789                 goto probe_out;
1790         }
1791         ha->pdev = pdev;
1792
1793         /* Clear our data area */
1794         ha->bars = bars;
1795         ha->mem_only = mem_only;
1796         spin_lock_init(&ha->hardware_lock);
1797
1798         /* Set ISP-type information. */
1799         qla2x00_set_isp_flags(ha);
1800
1801         /* Set EEH reset type to fundamental if required by hba */
1802         if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
1803                 pdev->needs_freset = 1;
1804         }
1805
1806         /* Configure PCI I/O space */
1807         ret = qla2x00_iospace_config(ha);
1808         if (ret)
1809                 goto probe_hw_failed;
1810
1811         qla_printk(KERN_INFO, ha,
1812             "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1813             ha->iobase);
1814
1815         ha->prev_topology = 0;
1816         ha->init_cb_size = sizeof(init_cb_t);
1817         ha->link_data_rate = PORT_SPEED_UNKNOWN;
1818         ha->optrom_size = OPTROM_SIZE_2300;
1819
1820         /* Assign ISP specific operations. */
1821         max_id = MAX_TARGETS_2200;
1822         if (IS_QLA2100(ha)) {
1823                 max_id = MAX_TARGETS_2100;
1824                 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1825                 req_length = REQUEST_ENTRY_CNT_2100;
1826                 rsp_length = RESPONSE_ENTRY_CNT_2100;
1827                 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1828                 ha->gid_list_info_size = 4;
1829                 ha->flash_conf_off = ~0;
1830                 ha->flash_data_off = ~0;
1831                 ha->nvram_conf_off = ~0;
1832                 ha->nvram_data_off = ~0;
1833                 ha->isp_ops = &qla2100_isp_ops;
1834         } else if (IS_QLA2200(ha)) {
1835                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1836                 req_length = REQUEST_ENTRY_CNT_2200;
1837                 rsp_length = RESPONSE_ENTRY_CNT_2100;
1838                 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1839                 ha->gid_list_info_size = 4;
1840                 ha->flash_conf_off = ~0;
1841                 ha->flash_data_off = ~0;
1842                 ha->nvram_conf_off = ~0;
1843                 ha->nvram_data_off = ~0;
1844                 ha->isp_ops = &qla2100_isp_ops;
1845         } else if (IS_QLA23XX(ha)) {
1846                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1847                 req_length = REQUEST_ENTRY_CNT_2200;
1848                 rsp_length = RESPONSE_ENTRY_CNT_2300;
1849                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1850                 ha->gid_list_info_size = 6;
1851                 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1852                         ha->optrom_size = OPTROM_SIZE_2322;
1853                 ha->flash_conf_off = ~0;
1854                 ha->flash_data_off = ~0;
1855                 ha->nvram_conf_off = ~0;
1856                 ha->nvram_data_off = ~0;
1857                 ha->isp_ops = &qla2300_isp_ops;
1858         } else if (IS_QLA24XX_TYPE(ha)) {
1859                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1860                 req_length = REQUEST_ENTRY_CNT_24XX;
1861                 rsp_length = RESPONSE_ENTRY_CNT_2300;
1862                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1863                 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1864                 ha->gid_list_info_size = 8;
1865                 ha->optrom_size = OPTROM_SIZE_24XX;
1866                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1867                 ha->isp_ops = &qla24xx_isp_ops;
1868                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1869                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1870                 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1871                 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1872         } else if (IS_QLA25XX(ha)) {
1873                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1874                 req_length = REQUEST_ENTRY_CNT_24XX;
1875                 rsp_length = RESPONSE_ENTRY_CNT_2300;
1876                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1877                 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1878                 ha->gid_list_info_size = 8;
1879                 ha->optrom_size = OPTROM_SIZE_25XX;
1880                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1881                 ha->isp_ops = &qla25xx_isp_ops;
1882                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
1883                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
1884                 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
1885                 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
1886         } else if (IS_QLA81XX(ha)) {
1887                 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1888                 req_length = REQUEST_ENTRY_CNT_24XX;
1889                 rsp_length = RESPONSE_ENTRY_CNT_2300;
1890                 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1891                 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
1892                 ha->gid_list_info_size = 8;
1893                 ha->optrom_size = OPTROM_SIZE_81XX;
1894                 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1895                 ha->isp_ops = &qla81xx_isp_ops;
1896                 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
1897                 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
1898                 ha->nvram_conf_off = ~0;
1899                 ha->nvram_data_off = ~0;
1900         }
1901
1902         mutex_init(&ha->vport_lock);
1903         init_completion(&ha->mbx_cmd_comp);
1904         complete(&ha->mbx_cmd_comp);
1905         init_completion(&ha->mbx_intr_comp);
1906
1907         set_bit(0, (unsigned long *) ha->vp_idx_map);
1908
1909         qla2x00_config_dma_addressing(ha);
1910         ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1911         if (!ret) {
1912                 qla_printk(KERN_WARNING, ha,
1913                     "[ERROR] Failed to allocate memory for adapter\n");
1914
1915                 goto probe_hw_failed;
1916         }
1917
1918         req->max_q_depth = MAX_Q_DEPTH;
1919         if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1920                 req->max_q_depth = ql2xmaxqdepth;
1921
1922
1923         base_vha = qla2x00_create_host(sht, ha);
1924         if (!base_vha) {
1925                 qla_printk(KERN_WARNING, ha,
1926                     "[ERROR] Failed to allocate memory for scsi_host\n");
1927
1928                 ret = -ENOMEM;
1929                 qla2x00_mem_free(ha);
1930                 qla2x00_free_req_que(ha, req);
1931                 qla2x00_free_rsp_que(ha, rsp);
1932                 goto probe_hw_failed;
1933         }
1934
1935         pci_set_drvdata(pdev, base_vha);
1936
1937         host = base_vha->host;
1938         base_vha->req = req;
1939         host->can_queue = req->length + 128;
1940         if (IS_QLA2XXX_MIDTYPE(ha))
1941                 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1942         else
1943                 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1944                                                 base_vha->vp_idx;
1945         if (IS_QLA2100(ha))
1946                 host->sg_tablesize = 32;
1947         host->max_id = max_id;
1948         host->this_id = 255;
1949         host->cmd_per_lun = 3;
1950         host->unique_id = host->host_no;
1951         host->max_cmd_len = MAX_CMDSZ;
1952         host->max_channel = MAX_BUSES - 1;
1953         host->max_lun = MAX_LUNS;
1954         host->transportt = qla2xxx_transport_template;
1955         sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
1956
1957         /* Set up the irqs */
1958         ret = qla2x00_request_irqs(ha, rsp);
1959         if (ret)
1960                 goto probe_init_failed;
1961
1962         pci_save_state(pdev);
1963
1964         /* Alloc arrays of request and response ring ptrs */
1965 que_init:
1966         if (!qla2x00_alloc_queues(ha)) {
1967                 qla_printk(KERN_WARNING, ha,
1968                 "[ERROR] Failed to allocate memory for queue"
1969                 " pointers\n");
1970                 goto probe_init_failed;
1971         }
1972         ha->rsp_q_map[0] = rsp;
1973         ha->req_q_map[0] = req;
1974         rsp->req = req;
1975         req->rsp = rsp;
1976         set_bit(0, ha->req_qid_map);
1977         set_bit(0, ha->rsp_qid_map);
1978         /* FWI2-capable only. */
1979         req->req_q_in = &ha->iobase->isp24.req_q_in;
1980         req->req_q_out = &ha->iobase->isp24.req_q_out;
1981         rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
1982         rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
1983         if (ha->mqenable) {
1984                 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
1985                 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
1986                 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
1987                 rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
1988         }
1989
1990         if (qla2x00_initialize_adapter(base_vha)) {
1991                 qla_printk(KERN_WARNING, ha,
1992                     "Failed to initialize adapter\n");
1993
1994                 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1995                     "Adapter flags %x.\n",
1996                     base_vha->host_no, base_vha->device_flags));
1997
1998                 ret = -ENODEV;
1999                 goto probe_failed;
2000         }
2001
2002         if (ha->mqenable) {
2003                 if (qla25xx_setup_mode(base_vha)) {
2004                         qla_printk(KERN_WARNING, ha,
2005                                 "Can't create queues, falling back to single"
2006                                 " queue mode\n");
2007                         goto que_init;
2008                 }
2009         }
2010
2011         if (ha->flags.running_gold_fw)
2012                 goto skip_dpc;
2013
2014         /*
2015          * Startup the kernel thread for this host adapter
2016          */
2017         ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
2018                         "%s_dpc", base_vha->host_str);
2019         if (IS_ERR(ha->dpc_thread)) {
2020                 qla_printk(KERN_WARNING, ha,
2021                     "Unable to start DPC thread!\n");
2022                 ret = PTR_ERR(ha->dpc_thread);
2023                 goto probe_failed;
2024         }
2025
2026 skip_dpc:
2027         list_add_tail(&base_vha->list, &ha->vp_list);
2028         base_vha->host->irq = ha->pdev->irq;
2029
2030         /* Initialized the timer */
2031         qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
2032
2033         DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
2034             base_vha->host_no, ha));
2035
2036         ret = scsi_add_host(host, &pdev->dev);
2037         if (ret)
2038                 goto probe_failed;
2039
2040         base_vha->flags.init_done = 1;
2041         base_vha->flags.online = 1;
2042
2043         ha->isp_ops->enable_intrs(ha);
2044
2045         scsi_scan_host(host);
2046
2047         qla2x00_alloc_sysfs_attr(base_vha);
2048
2049         qla2x00_init_host_attr(base_vha);
2050
2051         qla2x00_dfs_setup(base_vha);
2052
2053         qla_printk(KERN_INFO, ha, "\n"
2054             " QLogic Fibre Channel HBA Driver: %s\n"
2055             "  QLogic %s - %s\n"
2056             "  ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
2057             qla2x00_version_str, ha->model_number,
2058             ha->model_desc ? ha->model_desc : "", pdev->device,
2059             ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
2060             ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
2061             ha->isp_ops->fw_version_str(base_vha, fw_str));
2062
2063         return 0;
2064
2065 probe_init_failed:
2066         qla2x00_free_req_que(ha, req);
2067         qla2x00_free_rsp_que(ha, rsp);
2068         ha->max_req_queues = ha->max_rsp_queues = 0;
2069
2070 probe_failed:
2071         if (base_vha->timer_active)
2072                 qla2x00_stop_timer(base_vha);
2073         base_vha->flags.online = 0;
2074         if (ha->dpc_thread) {
2075                 struct task_struct *t = ha->dpc_thread;
2076
2077                 ha->dpc_thread = NULL;
2078                 kthread_stop(t);
2079         }
2080
2081         qla2x00_free_device(base_vha);
2082
2083         scsi_host_put(base_vha->host);
2084
2085 probe_hw_failed:
2086         if (ha->iobase)
2087                 iounmap(ha->iobase);
2088
2089         pci_release_selected_regions(ha->pdev, ha->bars);
2090         kfree(ha);
2091         ha = NULL;
2092
2093 probe_out:
2094         pci_disable_device(pdev);
2095         return ret;
2096 }
2097
2098 static void
2099 qla2x00_remove_one(struct pci_dev *pdev)
2100 {
2101         scsi_qla_host_t *base_vha, *vha, *temp;
2102         struct qla_hw_data  *ha;
2103
2104         base_vha = pci_get_drvdata(pdev);
2105         ha = base_vha->hw;
2106
2107         list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
2108                 if (vha && vha->fc_vport)
2109                         fc_vport_terminate(vha->fc_vport);
2110         }
2111
2112         set_bit(UNLOADING, &base_vha->dpc_flags);
2113
2114         qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2115
2116         qla2x00_dfs_remove(base_vha);
2117
2118         qla84xx_put_chip(base_vha);
2119
2120         /* Disable timer */
2121         if (base_vha->timer_active)
2122                 qla2x00_stop_timer(base_vha);
2123
2124         base_vha->flags.online = 0;
2125
2126         /* Flush the work queue and remove it */
2127         if (ha->wq) {
2128                 flush_workqueue(ha->wq);
2129                 destroy_workqueue(ha->wq);
2130                 ha->wq = NULL;
2131         }
2132
2133         /* Kill the kernel thread for this host */
2134         if (ha->dpc_thread) {
2135                 struct task_struct *t = ha->dpc_thread;
2136
2137                 /*
2138                  * qla2xxx_wake_dpc checks for ->dpc_thread
2139                  * so we need to zero it out.
2140                  */
2141                 ha->dpc_thread = NULL;
2142                 kthread_stop(t);
2143         }
2144
2145         qla2x00_free_sysfs_attr(base_vha);
2146
2147         fc_remove_host(base_vha->host);
2148
2149         scsi_remove_host(base_vha->host);
2150
2151         qla2x00_free_device(base_vha);
2152
2153         scsi_host_put(base_vha->host);
2154
2155         if (ha->iobase)
2156                 iounmap(ha->iobase);
2157
2158         if (ha->mqiobase)
2159                 iounmap(ha->mqiobase);
2160
2161         pci_release_selected_regions(ha->pdev, ha->bars);
2162         kfree(ha);
2163         ha = NULL;
2164
2165         pci_disable_pcie_error_reporting(pdev);
2166
2167         pci_disable_device(pdev);
2168         pci_set_drvdata(pdev, NULL);
2169 }
2170
2171 static void
2172 qla2x00_free_device(scsi_qla_host_t *vha)
2173 {
2174         struct qla_hw_data *ha = vha->hw;
2175
2176         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
2177
2178         /* Disable timer */
2179         if (vha->timer_active)
2180                 qla2x00_stop_timer(vha);
2181
2182         /* Kill the kernel thread for this host */
2183         if (ha->dpc_thread) {
2184                 struct task_struct *t = ha->dpc_thread;
2185
2186                 /*
2187                  * qla2xxx_wake_dpc checks for ->dpc_thread
2188                  * so we need to zero it out.
2189                  */
2190                 ha->dpc_thread = NULL;
2191                 kthread_stop(t);
2192         }
2193
2194         qla25xx_delete_queues(vha);
2195
2196         if (ha->flags.fce_enabled)
2197                 qla2x00_disable_fce_trace(vha, NULL, NULL);
2198
2199         if (ha->eft)
2200                 qla2x00_disable_eft_trace(vha);
2201
2202         /* Stop currently executing firmware. */
2203         qla2x00_try_to_stop_firmware(vha);
2204
2205         vha->flags.online = 0;
2206
2207         /* turn-off interrupts on the card */
2208         if (ha->interrupts_on)
2209                 ha->isp_ops->disable_intrs(ha);
2210
2211         qla2x00_free_irqs(vha);
2212
2213         qla2x00_mem_free(ha);
2214
2215         qla2x00_free_queues(ha);
2216 }
2217
2218 static inline void
2219 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2220     int defer)
2221 {
2222         struct fc_rport *rport;
2223         scsi_qla_host_t *base_vha;
2224
2225         if (!fcport->rport)
2226                 return;
2227
2228         rport = fcport->rport;
2229         if (defer) {
2230                 base_vha = pci_get_drvdata(vha->hw->pdev);
2231                 spin_lock_irq(vha->host->host_lock);
2232                 fcport->drport = rport;
2233                 spin_unlock_irq(vha->host->host_lock);
2234                 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2235                 qla2xxx_wake_dpc(base_vha);
2236         } else
2237                 fc_remote_port_delete(rport);
2238 }
2239
2240 /*
2241  * qla2x00_mark_device_lost Updates fcport state when device goes offline.
2242  *
2243  * Input: ha = adapter block pointer.  fcport = port structure pointer.
2244  *
2245  * Return: None.
2246  *
2247  * Context:
2248  */
2249 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2250     int do_login, int defer)
2251 {
2252         if (atomic_read(&fcport->state) == FCS_ONLINE &&
2253             vha->vp_idx == fcport->vp_idx) {
2254                 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2255                 qla2x00_schedule_rport_del(vha, fcport, defer);
2256         }
2257         /*
2258          * We may need to retry the login, so don't change the state of the
2259          * port but do the retries.
2260          */
2261         if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
2262                 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2263
2264         if (!do_login)
2265                 return;
2266
2267         if (fcport->login_retry == 0) {
2268                 fcport->login_retry = vha->hw->login_retry_count;
2269                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2270
2271                 DEBUG(printk("scsi(%ld): Port login retry: "
2272                     "%02x%02x%02x%02x%02x%02x%02x%02x, "
2273                     "id = 0x%04x retry cnt=%d\n",
2274                     vha->host_no,
2275                     fcport->port_name[0],
2276                     fcport->port_name[1],
2277                     fcport->port_name[2],
2278                     fcport->port_name[3],
2279                     fcport->port_name[4],
2280                     fcport->port_name[5],
2281                     fcport->port_name[6],
2282                     fcport->port_name[7],
2283                     fcport->loop_id,
2284                     fcport->login_retry));
2285         }
2286 }
2287
2288 /*
2289  * qla2x00_mark_all_devices_lost
2290  *      Updates fcport state when device goes offline.
2291  *
2292  * Input:
2293  *      ha = adapter block pointer.
2294  *      fcport = port structure pointer.
2295  *
2296  * Return:
2297  *      None.
2298  *
2299  * Context:
2300  */
2301 void
2302 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2303 {
2304         fc_port_t *fcport;
2305
2306         list_for_each_entry(fcport, &vha->vp_fcports, list) {
2307                 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
2308                         continue;
2309
2310                 /*
2311                  * No point in marking the device as lost, if the device is
2312                  * already DEAD.
2313                  */
2314                 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
2315                         continue;
2316                 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2317                         if (defer)
2318                                 qla2x00_schedule_rport_del(vha, fcport, defer);
2319                         else if (vha->vp_idx == fcport->vp_idx)
2320                                 qla2x00_schedule_rport_del(vha, fcport, defer);
2321                 }
2322                 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2323         }
2324 }
2325
2326 /*
2327 * qla2x00_mem_alloc
2328 *      Allocates adapter memory.
2329 *
2330 * Returns:
2331 *      0  = success.
2332 *      !0  = failure.
2333 */
2334 static int
2335 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2336         struct req_que **req, struct rsp_que **rsp)
2337 {
2338         char    name[16];
2339
2340         ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2341                 &ha->init_cb_dma, GFP_KERNEL);
2342         if (!ha->init_cb)
2343                 goto fail;
2344
2345         ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2346                 &ha->gid_list_dma, GFP_KERNEL);
2347         if (!ha->gid_list)
2348                 goto fail_free_init_cb;
2349
2350         ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2351         if (!ha->srb_mempool)
2352                 goto fail_free_gid_list;
2353
2354         /* Get memory for cached NVRAM */
2355         ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2356         if (!ha->nvram)
2357                 goto fail_free_srb_mempool;
2358
2359         snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2360                 ha->pdev->device);
2361         ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2362                 DMA_POOL_SIZE, 8, 0);
2363         if (!ha->s_dma_pool)
2364                 goto fail_free_nvram;
2365
2366         /* Allocate memory for SNS commands */
2367         if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2368         /* Get consistent memory allocated for SNS commands */
2369                 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2370                 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2371                 if (!ha->sns_cmd)
2372                         goto fail_dma_pool;
2373         } else {
2374         /* Get consistent memory allocated for MS IOCB */
2375                 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2376                         &ha->ms_iocb_dma);
2377                 if (!ha->ms_iocb)
2378                         goto fail_dma_pool;
2379         /* Get consistent memory allocated for CT SNS commands */
2380                 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2381                         sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2382                 if (!ha->ct_sns)
2383                         goto fail_free_ms_iocb;
2384         }
2385
2386         /* Allocate memory for request ring */
2387         *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2388         if (!*req) {
2389                 DEBUG(printk("Unable to allocate memory for req\n"));
2390                 goto fail_req;
2391         }
2392         (*req)->length = req_len;
2393         (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2394                 ((*req)->length + 1) * sizeof(request_t),
2395                 &(*req)->dma, GFP_KERNEL);
2396         if (!(*req)->ring) {
2397                 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2398                 goto fail_req_ring;
2399         }
2400         /* Allocate memory for response ring */
2401         *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2402         if (!*rsp) {
2403                 qla_printk(KERN_WARNING, ha,
2404                         "Unable to allocate memory for rsp\n");
2405                 goto fail_rsp;
2406         }
2407         (*rsp)->hw = ha;
2408         (*rsp)->length = rsp_len;
2409         (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2410                 ((*rsp)->length + 1) * sizeof(response_t),
2411                 &(*rsp)->dma, GFP_KERNEL);
2412         if (!(*rsp)->ring) {
2413                 qla_printk(KERN_WARNING, ha,
2414                         "Unable to allocate memory for rsp_ring\n");
2415                 goto fail_rsp_ring;
2416         }
2417         (*req)->rsp = *rsp;
2418         (*rsp)->req = *req;
2419         /* Allocate memory for NVRAM data for vports */
2420         if (ha->nvram_npiv_size) {
2421                 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2422                                         ha->nvram_npiv_size, GFP_KERNEL);
2423                 if (!ha->npiv_info) {
2424                         qla_printk(KERN_WARNING, ha,
2425                                 "Unable to allocate memory for npiv info\n");
2426                         goto fail_npiv_info;
2427                 }
2428         } else
2429                 ha->npiv_info = NULL;
2430
2431         /* Get consistent memory allocated for EX-INIT-CB. */
2432         if (IS_QLA81XX(ha)) {
2433                 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2434                     &ha->ex_init_cb_dma);
2435                 if (!ha->ex_init_cb)
2436                         goto fail_ex_init_cb;
2437         }
2438
2439         INIT_LIST_HEAD(&ha->vp_list);
2440         return 1;
2441
2442 fail_ex_init_cb:
2443         kfree(ha->npiv_info);
2444 fail_npiv_info:
2445         dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2446                 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2447         (*rsp)->ring = NULL;
2448         (*rsp)->dma = 0;
2449 fail_rsp_ring:
2450         kfree(*rsp);
2451 fail_rsp:
2452         dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2453                 sizeof(request_t), (*req)->ring, (*req)->dma);
2454         (*req)->ring = NULL;
2455         (*req)->dma = 0;
2456 fail_req_ring:
2457         kfree(*req);
2458 fail_req:
2459         dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2460                 ha->ct_sns, ha->ct_sns_dma);
2461         ha->ct_sns = NULL;
2462         ha->ct_sns_dma = 0;
2463 fail_free_ms_iocb:
2464         dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2465         ha->ms_iocb = NULL;
2466         ha->ms_iocb_dma = 0;
2467 fail_dma_pool:
2468         dma_pool_destroy(ha->s_dma_pool);
2469         ha->s_dma_pool = NULL;
2470 fail_free_nvram:
2471         kfree(ha->nvram);
2472         ha->nvram = NULL;
2473 fail_free_srb_mempool:
2474         mempool_destroy(ha->srb_mempool);
2475         ha->srb_mempool = NULL;
2476 fail_free_gid_list:
2477         dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2478         ha->gid_list_dma);
2479         ha->gid_list = NULL;
2480         ha->gid_list_dma = 0;
2481 fail_free_init_cb:
2482         dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2483         ha->init_cb_dma);
2484         ha->init_cb = NULL;
2485         ha->init_cb_dma = 0;
2486 fail:
2487         DEBUG(printk("%s: Memory allocation failure\n", __func__));
2488         return -ENOMEM;
2489 }
2490
2491 /*
2492 * qla2x00_mem_free
2493 *      Frees all adapter allocated memory.
2494 *
2495 * Input:
2496 *      ha = adapter block pointer.
2497 */
2498 static void
2499 qla2x00_mem_free(struct qla_hw_data *ha)
2500 {
2501         if (ha->srb_mempool)
2502                 mempool_destroy(ha->srb_mempool);
2503
2504         if (ha->fce)
2505                 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2506                 ha->fce_dma);
2507
2508         if (ha->fw_dump) {
2509                 if (ha->eft)
2510                         dma_free_coherent(&ha->pdev->dev,
2511                         ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2512                 vfree(ha->fw_dump);
2513         }
2514
2515         if (ha->dcbx_tlv)
2516                 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
2517                     ha->dcbx_tlv, ha->dcbx_tlv_dma);
2518
2519         if (ha->xgmac_data)
2520                 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
2521                     ha->xgmac_data, ha->xgmac_data_dma);
2522
2523         if (ha->sns_cmd)
2524                 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2525                 ha->sns_cmd, ha->sns_cmd_dma);
2526
2527         if (ha->ct_sns)
2528                 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2529                 ha->ct_sns, ha->ct_sns_dma);
2530
2531         if (ha->sfp_data)
2532                 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
2533
2534         if (ha->edc_data)
2535                 dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
2536
2537         if (ha->ms_iocb)
2538                 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2539
2540         if (ha->ex_init_cb)
2541                 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
2542
2543         if (ha->s_dma_pool)
2544                 dma_pool_destroy(ha->s_dma_pool);
2545
2546         if (ha->gid_list)
2547                 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2548                 ha->gid_list_dma);
2549
2550         if (ha->init_cb)
2551                 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2552                 ha->init_cb, ha->init_cb_dma);
2553         vfree(ha->optrom_buffer);
2554         kfree(ha->nvram);
2555         kfree(ha->npiv_info);
2556
2557         ha->srb_mempool = NULL;
2558         ha->eft = NULL;
2559         ha->eft_dma = 0;
2560         ha->sns_cmd = NULL;
2561         ha->sns_cmd_dma = 0;
2562         ha->ct_sns = NULL;
2563         ha->ct_sns_dma = 0;
2564         ha->ms_iocb = NULL;
2565         ha->ms_iocb_dma = 0;
2566         ha->init_cb = NULL;
2567         ha->init_cb_dma = 0;
2568         ha->ex_init_cb = NULL;
2569         ha->ex_init_cb_dma = 0;
2570
2571         ha->s_dma_pool = NULL;
2572
2573         ha->gid_list = NULL;
2574         ha->gid_list_dma = 0;
2575
2576         ha->fw_dump = NULL;
2577         ha->fw_dumped = 0;
2578         ha->fw_dump_reading = 0;
2579 }
2580
2581 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2582                                                 struct qla_hw_data *ha)
2583 {
2584         struct Scsi_Host *host;
2585         struct scsi_qla_host *vha = NULL;
2586
2587         host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2588         if (host == NULL) {
2589                 printk(KERN_WARNING
2590                 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2591                 goto fail;
2592         }
2593
2594         /* Clear our data area */
2595         vha = shost_priv(host);
2596         memset(vha, 0, sizeof(scsi_qla_host_t));
2597
2598         vha->host = host;
2599         vha->host_no = host->host_no;
2600         vha->hw = ha;
2601
2602         INIT_LIST_HEAD(&vha->vp_fcports);
2603         INIT_LIST_HEAD(&vha->work_list);
2604         INIT_LIST_HEAD(&vha->list);
2605
2606         spin_lock_init(&vha->work_lock);
2607
2608         sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2609         return vha;
2610
2611 fail:
2612         return vha;
2613 }
2614
2615 static struct qla_work_evt *
2616 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2617 {
2618         struct qla_work_evt *e;
2619
2620         e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2621         if (!e)
2622                 return NULL;
2623
2624         INIT_LIST_HEAD(&e->list);
2625         e->type = type;
2626         e->flags = QLA_EVT_FLAG_FREE;
2627         return e;
2628 }
2629
2630 static int
2631 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
2632 {
2633         unsigned long flags;
2634
2635         spin_lock_irqsave(&vha->work_lock, flags);
2636         list_add_tail(&e->list, &vha->work_list);
2637         spin_unlock_irqrestore(&vha->work_lock, flags);
2638         qla2xxx_wake_dpc(vha);
2639
2640         return QLA_SUCCESS;
2641 }
2642
2643 int
2644 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2645     u32 data)
2646 {
2647         struct qla_work_evt *e;
2648
2649         e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
2650         if (!e)
2651                 return QLA_FUNCTION_FAILED;
2652
2653         e->u.aen.code = code;
2654         e->u.aen.data = data;
2655         return qla2x00_post_work(vha, e);
2656 }
2657
2658 int
2659 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
2660 {
2661         struct qla_work_evt *e;
2662
2663         e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
2664         if (!e)
2665                 return QLA_FUNCTION_FAILED;
2666
2667         memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
2668         return qla2x00_post_work(vha, e);
2669 }
2670
2671 #define qla2x00_post_async_work(name, type)     \
2672 int qla2x00_post_async_##name##_work(           \
2673     struct scsi_qla_host *vha,                  \
2674     fc_port_t *fcport, uint16_t *data)          \
2675 {                                               \
2676         struct qla_work_evt *e;                 \
2677                                                 \
2678         e = qla2x00_alloc_work(vha, type);      \
2679         if (!e)                                 \
2680                 return QLA_FUNCTION_FAILED;     \
2681                                                 \
2682         e->u.logio.fcport = fcport;             \
2683         if (data) {                             \
2684                 e->u.logio.data[0] = data[0];   \
2685                 e->u.logio.data[1] = data[1];   \
2686         }                                       \
2687         return qla2x00_post_work(vha, e);       \
2688 }
2689
2690 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
2691 qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
2692 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
2693 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
2694
2695 int
2696 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
2697 {
2698         struct qla_work_evt *e;
2699
2700         e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
2701         if (!e)
2702                 return QLA_FUNCTION_FAILED;
2703
2704         e->u.uevent.code = code;
2705         return qla2x00_post_work(vha, e);
2706 }
2707
2708 static void
2709 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
2710 {
2711         char event_string[40];
2712         char *envp[] = { event_string, NULL };
2713
2714         switch (code) {
2715         case QLA_UEVENT_CODE_FW_DUMP:
2716                 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
2717                     vha->host_no);
2718                 break;
2719         default:
2720                 /* do nothing */
2721                 break;
2722         }
2723         kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
2724 }
2725
2726 void
2727 qla2x00_do_work(struct scsi_qla_host *vha)
2728 {
2729         struct qla_work_evt *e, *tmp;
2730         unsigned long flags;
2731         LIST_HEAD(work);
2732
2733         spin_lock_irqsave(&vha->work_lock, flags);
2734         list_splice_init(&vha->work_list, &work);
2735         spin_unlock_irqrestore(&vha->work_lock, flags);
2736
2737         list_for_each_entry_safe(e, tmp, &work, list) {
2738                 list_del_init(&e->list);
2739
2740                 switch (e->type) {
2741                 case QLA_EVT_AEN:
2742                         fc_host_post_event(vha->host, fc_get_event_number(),
2743                             e->u.aen.code, e->u.aen.data);
2744                         break;
2745                 case QLA_EVT_IDC_ACK:
2746                         qla81xx_idc_ack(vha, e->u.idc_ack.mb);
2747                         break;
2748                 case QLA_EVT_ASYNC_LOGIN:
2749                         qla2x00_async_login(vha, e->u.logio.fcport,
2750                             e->u.logio.data);
2751                         break;
2752                 case QLA_EVT_ASYNC_LOGIN_DONE:
2753                         qla2x00_async_login_done(vha, e->u.logio.fcport,
2754                             e->u.logio.data);
2755                         break;
2756                 case QLA_EVT_ASYNC_LOGOUT:
2757                         qla2x00_async_logout(vha, e->u.logio.fcport);
2758                         break;
2759                 case QLA_EVT_ASYNC_LOGOUT_DONE:
2760                         qla2x00_async_logout_done(vha, e->u.logio.fcport,
2761                             e->u.logio.data);
2762                         break;
2763                 case QLA_EVT_UEVENT:
2764                         qla2x00_uevent_emit(vha, e->u.uevent.code);
2765                         break;
2766                 }
2767                 if (e->flags & QLA_EVT_FLAG_FREE)
2768                         kfree(e);
2769         }
2770 }
2771
2772 /* Relogins all the fcports of a vport
2773  * Context: dpc thread
2774  */
2775 void qla2x00_relogin(struct scsi_qla_host *vha)
2776 {
2777         fc_port_t       *fcport;
2778         int status;
2779         uint16_t        next_loopid = 0;
2780         struct qla_hw_data *ha = vha->hw;
2781         uint16_t data[2];
2782
2783         list_for_each_entry(fcport, &vha->vp_fcports, list) {
2784         /*
2785          * If the port is not ONLINE then try to login
2786          * to it if we haven't run out of retries.
2787          */
2788                 if (atomic_read(&fcport->state) !=
2789                         FCS_ONLINE && fcport->login_retry) {
2790
2791                         fcport->login_retry--;
2792                         if (fcport->flags & FCF_FABRIC_DEVICE) {
2793                                 if (fcport->flags & FCF_FCP2_DEVICE)
2794                                         ha->isp_ops->fabric_logout(vha,
2795                                                         fcport->loop_id,
2796                                                         fcport->d_id.b.domain,
2797                                                         fcport->d_id.b.area,
2798                                                         fcport->d_id.b.al_pa);
2799
2800                                 if (IS_ALOGIO_CAPABLE(ha)) {
2801                                         data[0] = 0;
2802                                         data[1] = QLA_LOGIO_LOGIN_RETRIED;
2803                                         status = qla2x00_post_async_login_work(
2804                                             vha, fcport, data);
2805                                         if (status == QLA_SUCCESS)
2806                                                 continue;
2807                                         /* Attempt a retry. */
2808                                         status = 1;
2809                                 } else
2810                                         status = qla2x00_fabric_login(vha,
2811                                             fcport, &next_loopid);
2812                         } else
2813                                 status = qla2x00_local_device_login(vha,
2814                                                                 fcport);
2815
2816                         if (status == QLA_SUCCESS) {
2817                                 fcport->old_loop_id = fcport->loop_id;
2818
2819                                 DEBUG(printk("scsi(%ld): port login OK: logged "
2820                                 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
2821
2822                                 qla2x00_update_fcport(vha, fcport);
2823
2824                         } else if (status == 1) {
2825                                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2826                                 /* retry the login again */
2827                                 DEBUG(printk("scsi(%ld): Retrying"
2828                                 " %d login again loop_id 0x%x\n",
2829                                 vha->host_no, fcport->login_retry,
2830                                                 fcport->loop_id));
2831                         } else {
2832                                 fcport->login_retry = 0;
2833                         }
2834
2835                         if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2836                                 fcport->loop_id = FC_NO_LOOP_ID;
2837                 }
2838                 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2839                         break;
2840         }
2841 }
2842
2843 /**************************************************************************
2844 * qla2x00_do_dpc
2845 *   This kernel thread is a task that is schedule by the interrupt handler
2846 *   to perform the background processing for interrupts.
2847 *
2848 * Notes:
2849 * This task always run in the context of a kernel thread.  It
2850 * is kick-off by the driver's detect code and starts up
2851 * up one per adapter. It immediately goes to sleep and waits for
2852 * some fibre event.  When either the interrupt handler or
2853 * the timer routine detects a event it will one of the task
2854 * bits then wake us up.
2855 **************************************************************************/
2856 static int
2857 qla2x00_do_dpc(void *data)
2858 {
2859         int             rval;
2860         scsi_qla_host_t *base_vha;
2861         struct qla_hw_data *ha;
2862
2863         ha = (struct qla_hw_data *)data;
2864         base_vha = pci_get_drvdata(ha->pdev);
2865
2866         set_user_nice(current, -20);
2867
2868         while (!kthread_should_stop()) {
2869                 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
2870
2871                 set_current_state(TASK_INTERRUPTIBLE);
2872                 schedule();
2873                 __set_current_state(TASK_RUNNING);
2874
2875                 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2876
2877                 /* Initialization not yet finished. Don't do anything yet. */
2878                 if (!base_vha->flags.init_done)
2879                         continue;
2880
2881                 if (ha->flags.eeh_busy) {
2882                         DEBUG17(qla_printk(KERN_WARNING, ha,
2883                             "qla2x00_do_dpc: dpc_flags: %lx\n",
2884                             base_vha->dpc_flags));
2885                         continue;
2886                 }
2887
2888                 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2889
2890                 ha->dpc_active = 1;
2891
2892                 if (ha->flags.mbox_busy) {
2893                         ha->dpc_active = 0;
2894                         continue;
2895                 }
2896
2897                 qla2x00_do_work(base_vha);
2898
2899                 if (test_and_clear_bit(ISP_ABORT_NEEDED,
2900                                                 &base_vha->dpc_flags)) {
2901
2902                         DEBUG(printk("scsi(%ld): dpc: sched "
2903                             "qla2x00_abort_isp ha = %p\n",
2904                             base_vha->host_no, ha));
2905                         if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2906                             &base_vha->dpc_flags))) {
2907
2908                                 if (qla2x00_abort_isp(base_vha)) {
2909                                         /* failed. retry later */
2910                                         set_bit(ISP_ABORT_NEEDED,
2911                                             &base_vha->dpc_flags);
2912                                 }
2913                                 clear_bit(ABORT_ISP_ACTIVE,
2914                                                 &base_vha->dpc_flags);
2915                         }
2916
2917                         DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2918                             base_vha->host_no));
2919                 }
2920
2921                 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
2922                         qla2x00_update_fcports(base_vha);
2923                         clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2924                 }
2925
2926                 if (test_and_clear_bit(RESET_MARKER_NEEDED,
2927                                                         &base_vha->dpc_flags) &&
2928                     (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
2929
2930                         DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2931                             base_vha->host_no));
2932
2933                         qla2x00_rst_aen(base_vha);
2934                         clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
2935                 }
2936
2937                 /* Retry each device up to login retry count */
2938                 if ((test_and_clear_bit(RELOGIN_NEEDED,
2939                                                 &base_vha->dpc_flags)) &&
2940                     !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
2941                     atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
2942
2943                         DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2944                                         base_vha->host_no));
2945                         qla2x00_relogin(base_vha);
2946
2947                         DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2948                             base_vha->host_no));
2949                 }
2950
2951                 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
2952                                                         &base_vha->dpc_flags)) {
2953
2954                         DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2955                                 base_vha->host_no));
2956
2957                         if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2958                             &base_vha->dpc_flags))) {
2959
2960                                 rval = qla2x00_loop_resync(base_vha);
2961
2962                                 clear_bit(LOOP_RESYNC_ACTIVE,
2963                                                 &base_vha->dpc_flags);
2964                         }
2965
2966                         DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2967                             base_vha->host_no));
2968                 }
2969
2970                 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
2971                     atomic_read(&base_vha->loop_state) == LOOP_READY) {
2972                         clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
2973                         qla2xxx_flash_npiv_conf(base_vha);
2974                 }
2975
2976                 if (!ha->interrupts_on)
2977                         ha->isp_ops->enable_intrs(ha);
2978
2979                 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
2980                                         &base_vha->dpc_flags))
2981                         ha->isp_ops->beacon_blink(base_vha);
2982
2983                 qla2x00_do_dpc_all_vps(base_vha);
2984
2985                 ha->dpc_active = 0;
2986         } /* End of while(1) */
2987
2988         DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
2989
2990         /*
2991          * Make sure that nobody tries to wake us up again.
2992          */
2993         ha->dpc_active = 0;
2994
2995         /* Cleanup any residual CTX SRBs. */
2996         qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
2997
2998         return 0;
2999 }
3000
3001 void
3002 qla2xxx_wake_dpc(struct scsi_qla_host *vha)
3003 {
3004         struct qla_hw_data *ha = vha->hw;
3005         struct task_struct *t = ha->dpc_thread;
3006
3007         if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
3008                 wake_up_process(t);
3009 }
3010
3011 /*
3012 *  qla2x00_rst_aen
3013 *      Processes asynchronous reset.
3014 *
3015 * Input:
3016 *      ha  = adapter block pointer.
3017 */
3018 static void
3019 qla2x00_rst_aen(scsi_qla_host_t *vha)
3020 {
3021         if (vha->flags.online && !vha->flags.reset_active &&
3022             !atomic_read(&vha->loop_down_timer) &&
3023             !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
3024                 do {
3025                         clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3026
3027                         /*
3028                          * Issue marker command only when we are going to start
3029                          * the I/O.
3030                          */
3031                         vha->marker_needed = 1;
3032                 } while (!atomic_read(&vha->loop_down_timer) &&
3033                     (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
3034         }
3035 }
3036
3037 static void
3038 qla2x00_sp_free_dma(srb_t *sp)
3039 {
3040         struct scsi_cmnd *cmd = sp->cmd;
3041
3042         if (sp->flags & SRB_DMA_VALID) {
3043                 scsi_dma_unmap(cmd);
3044                 sp->flags &= ~SRB_DMA_VALID;
3045         }
3046         CMD_SP(cmd) = NULL;
3047 }
3048
3049 void
3050 qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3051 {
3052         struct scsi_cmnd *cmd = sp->cmd;
3053
3054         qla2x00_sp_free_dma(sp);
3055
3056         mempool_free(sp, ha->srb_mempool);
3057
3058         cmd->scsi_done(cmd);
3059 }
3060
3061 /**************************************************************************
3062 *   qla2x00_timer
3063 *
3064 * Description:
3065 *   One second timer
3066 *
3067 * Context: Interrupt
3068 ***************************************************************************/
3069 void
3070 qla2x00_timer(scsi_qla_host_t *vha)
3071 {
3072         unsigned long   cpu_flags = 0;
3073         fc_port_t       *fcport;
3074         int             start_dpc = 0;
3075         int             index;
3076         srb_t           *sp;
3077         int             t;
3078         uint16_t        w;
3079         struct qla_hw_data *ha = vha->hw;
3080         struct req_que *req;
3081
3082         /* Hardware read to raise pending EEH errors during mailbox waits. */
3083         if (!pci_channel_offline(ha->pdev))
3084                 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3085         /*
3086          * Ports - Port down timer.
3087          *
3088          * Whenever, a port is in the LOST state we start decrementing its port
3089          * down timer every second until it reaches zero. Once  it reaches zero
3090          * the port it marked DEAD.
3091          */
3092         t = 0;
3093         list_for_each_entry(fcport, &vha->vp_fcports, list) {
3094                 if (fcport->port_type != FCT_TARGET)
3095                         continue;
3096
3097                 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
3098
3099                         if (atomic_read(&fcport->port_down_timer) == 0)
3100                                 continue;
3101
3102                         if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
3103                                 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
3104
3105                         DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
3106                             "%d remaining\n",
3107                             vha->host_no,
3108                             t, atomic_read(&fcport->port_down_timer)));
3109                 }
3110                 t++;
3111         } /* End of for fcport  */
3112
3113
3114         /* Loop down handler. */
3115         if (atomic_read(&vha->loop_down_timer) > 0 &&
3116             !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3117                 && vha->flags.online) {
3118
3119                 if (atomic_read(&vha->loop_down_timer) ==
3120                     vha->loop_down_abort_time) {
3121
3122                         DEBUG(printk("scsi(%ld): Loop Down - aborting the "
3123                             "queues before time expire\n",
3124                             vha->host_no));
3125
3126                         if (!IS_QLA2100(ha) && vha->link_down_timeout)
3127                                 atomic_set(&vha->loop_state, LOOP_DEAD);
3128
3129                         /*
3130                          * Schedule an ISP abort to return any FCP2-device
3131                          * commands.
3132                          */
3133                         /* NPIV - scan physical port only */
3134                         if (!vha->vp_idx) {
3135                                 spin_lock_irqsave(&ha->hardware_lock,
3136                                     cpu_flags);
3137                                 req = ha->req_q_map[0];
3138                                 for (index = 1;
3139                                     index < MAX_OUTSTANDING_COMMANDS;
3140                                     index++) {
3141                                         fc_port_t *sfcp;
3142
3143                                         sp = req->outstanding_cmds[index];
3144                                         if (!sp)
3145                                                 continue;
3146                                         if (sp->ctx)
3147                                                 continue;
3148                                         sfcp = sp->fcport;
3149                                         if (!(sfcp->flags & FCF_FCP2_DEVICE))
3150                                                 continue;
3151
3152                                         set_bit(ISP_ABORT_NEEDED,
3153                                                         &vha->dpc_flags);
3154                                         break;
3155                                 }
3156                                 spin_unlock_irqrestore(&ha->hardware_lock,
3157                                                                 cpu_flags);
3158                         }
3159                         start_dpc++;
3160                 }
3161
3162                 /* if the loop has been down for 4 minutes, reinit adapter */
3163                 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
3164                         if (!(vha->device_flags & DFLG_NO_CABLE)) {
3165                                 DEBUG(printk("scsi(%ld): Loop down - "
3166                                     "aborting ISP.\n",
3167                                     vha->host_no));
3168                                 qla_printk(KERN_WARNING, ha,
3169                                     "Loop down - aborting ISP.\n");
3170
3171                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3172                         }
3173                 }
3174                 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
3175                     vha->host_no,
3176                     atomic_read(&vha->loop_down_timer)));
3177         }
3178
3179         /* Check if beacon LED needs to be blinked */
3180         if (ha->beacon_blink_led == 1) {
3181                 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3182                 start_dpc++;
3183         }
3184
3185         /* Process any deferred work. */
3186         if (!list_empty(&vha->work_list))
3187                 start_dpc++;
3188
3189         /* Schedule the DPC routine if needed */
3190         if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3191             test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
3192             test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
3193             start_dpc ||
3194             test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
3195             test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
3196             test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
3197             test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
3198                 qla2xxx_wake_dpc(vha);
3199
3200         qla2x00_restart_timer(vha, WATCH_INTERVAL);
3201 }
3202
3203 /* Firmware interface routines. */
3204
3205 #define FW_BLOBS        7
3206 #define FW_ISP21XX      0
3207 #define FW_ISP22XX      1
3208 #define FW_ISP2300      2
3209 #define FW_ISP2322      3
3210 #define FW_ISP24XX      4
3211 #define FW_ISP25XX      5
3212 #define FW_ISP81XX      6
3213
3214 #define FW_FILE_ISP21XX "ql2100_fw.bin"
3215 #define FW_FILE_ISP22XX "ql2200_fw.bin"
3216 #define FW_FILE_ISP2300 "ql2300_fw.bin"
3217 #define FW_FILE_ISP2322 "ql2322_fw.bin"
3218 #define FW_FILE_ISP24XX "ql2400_fw.bin"
3219 #define FW_FILE_ISP25XX "ql2500_fw.bin"
3220 #define FW_FILE_ISP81XX "ql8100_fw.bin"
3221
3222 static DEFINE_MUTEX(qla_fw_lock);
3223
3224 static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3225         { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
3226         { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
3227         { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
3228         { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
3229         { .name = FW_FILE_ISP24XX, },
3230         { .name = FW_FILE_ISP25XX, },
3231         { .name = FW_FILE_ISP81XX, },
3232 };
3233
3234 struct fw_blob *
3235 qla2x00_request_firmware(scsi_qla_host_t *vha)
3236 {
3237         struct qla_hw_data *ha = vha->hw;
3238         struct fw_blob *blob;
3239
3240         blob = NULL;
3241         if (IS_QLA2100(ha)) {
3242                 blob = &qla_fw_blobs[FW_ISP21XX];
3243         } else if (IS_QLA2200(ha)) {
3244                 blob = &qla_fw_blobs[FW_ISP22XX];
3245         } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3246                 blob = &qla_fw_blobs[FW_ISP2300];
3247         } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
3248                 blob = &qla_fw_blobs[FW_ISP2322];
3249         } else if (IS_QLA24XX_TYPE(ha)) {
3250                 blob = &qla_fw_blobs[FW_ISP24XX];
3251         } else if (IS_QLA25XX(ha)) {
3252                 blob = &qla_fw_blobs[FW_ISP25XX];
3253         } else if (IS_QLA81XX(ha)) {
3254                 blob = &qla_fw_blobs[FW_ISP81XX];
3255         }
3256
3257         mutex_lock(&qla_fw_lock);
3258         if (blob->fw)
3259                 goto out;
3260
3261         if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
3262                 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
3263                     "(%s).\n", vha->host_no, blob->name));
3264                 blob->fw = NULL;
3265                 blob = NULL;
3266                 goto out;
3267         }
3268
3269 out:
3270         mutex_unlock(&qla_fw_lock);
3271         return blob;
3272 }
3273
3274 static void
3275 qla2x00_release_firmware(void)
3276 {
3277         int idx;
3278
3279         mutex_lock(&qla_fw_lock);
3280         for (idx = 0; idx < FW_BLOBS; idx++)
3281                 if (qla_fw_blobs[idx].fw)
3282                         release_firmware(qla_fw_blobs[idx].fw);
3283         mutex_unlock(&qla_fw_lock);
3284 }
3285
3286 static pci_ers_result_t
3287 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3288 {
3289         scsi_qla_host_t *vha = pci_get_drvdata(pdev);
3290         struct qla_hw_data *ha = vha->hw;
3291
3292         DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
3293             state));
3294
3295         switch (state) {
3296         case pci_channel_io_normal:
3297                 ha->flags.eeh_busy = 0;
3298                 return PCI_ERS_RESULT_CAN_RECOVER;
3299         case pci_channel_io_frozen:
3300                 ha->flags.eeh_busy = 1;
3301                 qla2x00_free_irqs(vha);
3302                 pci_disable_device(pdev);
3303                 return PCI_ERS_RESULT_NEED_RESET;
3304         case pci_channel_io_perm_failure:
3305                 ha->flags.pci_channel_io_perm_failure = 1;
3306                 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3307                 return PCI_ERS_RESULT_DISCONNECT;
3308         }
3309         return PCI_ERS_RESULT_NEED_RESET;
3310 }
3311
3312 static pci_ers_result_t
3313 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
3314 {
3315         int risc_paused = 0;
3316         uint32_t stat;
3317         unsigned long flags;
3318         scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3319         struct qla_hw_data *ha = base_vha->hw;
3320         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3321         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
3322
3323         spin_lock_irqsave(&ha->hardware_lock, flags);
3324         if (IS_QLA2100(ha) || IS_QLA2200(ha)){
3325                 stat = RD_REG_DWORD(&reg->hccr);
3326                 if (stat & HCCR_RISC_PAUSE)
3327                         risc_paused = 1;
3328         } else if (IS_QLA23XX(ha)) {
3329                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
3330                 if (stat & HSR_RISC_PAUSED)
3331                         risc_paused = 1;
3332         } else if (IS_FWI2_CAPABLE(ha)) {
3333                 stat = RD_REG_DWORD(&reg24->host_status);
3334                 if (stat & HSRX_RISC_PAUSED)
3335                         risc_paused = 1;
3336         }
3337         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3338
3339         if (risc_paused) {
3340                 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
3341                     "Dumping firmware!\n");
3342                 ha->isp_ops->fw_dump(base_vha, 0);
3343
3344                 return PCI_ERS_RESULT_NEED_RESET;
3345         } else
3346                 return PCI_ERS_RESULT_RECOVERED;
3347 }
3348
3349 static pci_ers_result_t
3350 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
3351 {
3352         pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
3353         scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3354         struct qla_hw_data *ha = base_vha->hw;
3355         struct rsp_que *rsp;
3356         int rc, retries = 10;
3357
3358         DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
3359
3360         /* Workaround: qla2xxx driver which access hardware earlier
3361          * needs error state to be pci_channel_io_online.
3362          * Otherwise mailbox command timesout.
3363          */
3364         pdev->error_state = pci_channel_io_normal;
3365
3366         pci_restore_state(pdev);
3367
3368         /* pci_restore_state() clears the saved_state flag of the device
3369          * save restored state which resets saved_state flag
3370          */
3371         pci_save_state(pdev);
3372
3373         if (ha->mem_only)
3374                 rc = pci_enable_device_mem(pdev);
3375         else
3376                 rc = pci_enable_device(pdev);
3377
3378         if (rc) {
3379                 qla_printk(KERN_WARNING, ha,
3380                     "Can't re-enable PCI device after reset.\n");
3381                 return ret;
3382         }
3383
3384         rsp = ha->rsp_q_map[0];
3385         if (qla2x00_request_irqs(ha, rsp))
3386                 return ret;
3387
3388         if (ha->isp_ops->pci_config(base_vha))
3389                 return ret;
3390
3391         while (ha->flags.mbox_busy && retries--)
3392                 msleep(1000);
3393
3394         set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3395         if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
3396                 ret =  PCI_ERS_RESULT_RECOVERED;
3397         clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
3398
3399         pci_cleanup_aer_uncorrect_error_status(pdev);
3400
3401         DEBUG17(qla_printk(KERN_WARNING, ha,
3402             "slot_reset-return:ret=%x\n", ret));
3403
3404         return ret;
3405 }
3406
3407 static void
3408 qla2xxx_pci_resume(struct pci_dev *pdev)
3409 {
3410         scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3411         struct qla_hw_data *ha = base_vha->hw;
3412         int ret;
3413
3414         DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
3415
3416         ret = qla2x00_wait_for_hba_online(base_vha);
3417         if (ret != QLA_SUCCESS) {
3418                 qla_printk(KERN_ERR, ha,
3419                     "the device failed to resume I/O "
3420                     "from slot/link_reset");
3421         }
3422
3423         ha->flags.eeh_busy = 0;
3424 }
3425
3426 static struct pci_error_handlers qla2xxx_err_handler = {
3427         .error_detected = qla2xxx_pci_error_detected,
3428         .mmio_enabled = qla2xxx_pci_mmio_enabled,
3429         .slot_reset = qla2xxx_pci_slot_reset,
3430         .resume = qla2xxx_pci_resume,
3431 };
3432
3433 static struct pci_device_id qla2xxx_pci_tbl[] = {
3434         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
3435         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
3436         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
3437         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
3438         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
3439         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
3440         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
3441         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
3442         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
3443         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
3444         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
3445         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
3446         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
3447         { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
3448         { 0 },
3449 };
3450 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
3451
3452 static struct pci_driver qla2xxx_pci_driver = {
3453         .name           = QLA2XXX_DRIVER_NAME,
3454         .driver         = {
3455                 .owner          = THIS_MODULE,
3456         },
3457         .id_table       = qla2xxx_pci_tbl,
3458         .probe          = qla2x00_probe_one,
3459         .remove         = qla2x00_remove_one,
3460         .err_handler    = &qla2xxx_err_handler,
3461 };
3462
3463 /**
3464  * qla2x00_module_init - Module initialization.
3465  **/
3466 static int __init
3467 qla2x00_module_init(void)
3468 {
3469         int ret = 0;
3470
3471         /* Allocate cache for SRBs. */
3472         srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
3473             SLAB_HWCACHE_ALIGN, NULL);
3474         if (srb_cachep == NULL) {
3475                 printk(KERN_ERR
3476                     "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
3477                 return -ENOMEM;
3478         }
3479
3480         /* Derive version string. */
3481         strcpy(qla2x00_version_str, QLA2XXX_VERSION);
3482         if (ql2xextended_error_logging)
3483                 strcat(qla2x00_version_str, "-debug");
3484
3485         qla2xxx_transport_template =
3486             fc_attach_transport(&qla2xxx_transport_functions);
3487         if (!qla2xxx_transport_template) {
3488                 kmem_cache_destroy(srb_cachep);
3489                 return -ENODEV;
3490         }
3491         qla2xxx_transport_vport_template =
3492             fc_attach_transport(&qla2xxx_transport_vport_functions);
3493         if (!qla2xxx_transport_vport_template) {
3494                 kmem_cache_destroy(srb_cachep);
3495                 fc_release_transport(qla2xxx_transport_template);
3496                 return -ENODEV;
3497         }
3498
3499         printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
3500             qla2x00_version_str);
3501         ret = pci_register_driver(&qla2xxx_pci_driver);
3502         if (ret) {
3503                 kmem_cache_destroy(srb_cachep);
3504                 fc_release_transport(qla2xxx_transport_template);
3505                 fc_release_transport(qla2xxx_transport_vport_template);
3506         }
3507         return ret;
3508 }
3509
3510 /**
3511  * qla2x00_module_exit - Module cleanup.
3512  **/
3513 static void __exit
3514 qla2x00_module_exit(void)
3515 {
3516         pci_unregister_driver(&qla2xxx_pci_driver);
3517         qla2x00_release_firmware();
3518         kmem_cache_destroy(srb_cachep);
3519         fc_release_transport(qla2xxx_transport_template);
3520         fc_release_transport(qla2xxx_transport_vport_template);
3521 }
3522
3523 module_init(qla2x00_module_init);
3524 module_exit(qla2x00_module_exit);
3525
3526 MODULE_AUTHOR("QLogic Corporation");
3527 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
3528 MODULE_LICENSE("GPL");
3529 MODULE_VERSION(QLA2XXX_VERSION);
3530 MODULE_FIRMWARE(FW_FILE_ISP21XX);
3531 MODULE_FIRMWARE(FW_FILE_ISP22XX);
3532 MODULE_FIRMWARE(FW_FILE_ISP2300);
3533 MODULE_FIRMWARE(FW_FILE_ISP2322);
3534 MODULE_FIRMWARE(FW_FILE_ISP24XX);
3535 MODULE_FIRMWARE(FW_FILE_ISP25XX);