]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/ipr.c
[SCSI] ipr: Handle early EEH
[karo-tx-linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107                 .mailbox = 0x0042C,
108                 .max_cmds = 100,
109                 .cache_line_size = 0x20,
110                 .clear_isr = 1,
111                 .iopoll_weight = 0,
112                 {
113                         .set_interrupt_mask_reg = 0x0022C,
114                         .clr_interrupt_mask_reg = 0x00230,
115                         .clr_interrupt_mask_reg32 = 0x00230,
116                         .sense_interrupt_mask_reg = 0x0022C,
117                         .sense_interrupt_mask_reg32 = 0x0022C,
118                         .clr_interrupt_reg = 0x00228,
119                         .clr_interrupt_reg32 = 0x00228,
120                         .sense_interrupt_reg = 0x00224,
121                         .sense_interrupt_reg32 = 0x00224,
122                         .ioarrin_reg = 0x00404,
123                         .sense_uproc_interrupt_reg = 0x00214,
124                         .sense_uproc_interrupt_reg32 = 0x00214,
125                         .set_uproc_interrupt_reg = 0x00214,
126                         .set_uproc_interrupt_reg32 = 0x00214,
127                         .clr_uproc_interrupt_reg = 0x00218,
128                         .clr_uproc_interrupt_reg32 = 0x00218
129                 }
130         },
131         { /* Snipe and Scamp */
132                 .mailbox = 0x0052C,
133                 .max_cmds = 100,
134                 .cache_line_size = 0x20,
135                 .clear_isr = 1,
136                 .iopoll_weight = 0,
137                 {
138                         .set_interrupt_mask_reg = 0x00288,
139                         .clr_interrupt_mask_reg = 0x0028C,
140                         .clr_interrupt_mask_reg32 = 0x0028C,
141                         .sense_interrupt_mask_reg = 0x00288,
142                         .sense_interrupt_mask_reg32 = 0x00288,
143                         .clr_interrupt_reg = 0x00284,
144                         .clr_interrupt_reg32 = 0x00284,
145                         .sense_interrupt_reg = 0x00280,
146                         .sense_interrupt_reg32 = 0x00280,
147                         .ioarrin_reg = 0x00504,
148                         .sense_uproc_interrupt_reg = 0x00290,
149                         .sense_uproc_interrupt_reg32 = 0x00290,
150                         .set_uproc_interrupt_reg = 0x00290,
151                         .set_uproc_interrupt_reg32 = 0x00290,
152                         .clr_uproc_interrupt_reg = 0x00294,
153                         .clr_uproc_interrupt_reg32 = 0x00294
154                 }
155         },
156         { /* CRoC */
157                 .mailbox = 0x00044,
158                 .max_cmds = 1000,
159                 .cache_line_size = 0x20,
160                 .clear_isr = 0,
161                 .iopoll_weight = 64,
162                 {
163                         .set_interrupt_mask_reg = 0x00010,
164                         .clr_interrupt_mask_reg = 0x00018,
165                         .clr_interrupt_mask_reg32 = 0x0001C,
166                         .sense_interrupt_mask_reg = 0x00010,
167                         .sense_interrupt_mask_reg32 = 0x00014,
168                         .clr_interrupt_reg = 0x00008,
169                         .clr_interrupt_reg32 = 0x0000C,
170                         .sense_interrupt_reg = 0x00000,
171                         .sense_interrupt_reg32 = 0x00004,
172                         .ioarrin_reg = 0x00070,
173                         .sense_uproc_interrupt_reg = 0x00020,
174                         .sense_uproc_interrupt_reg32 = 0x00024,
175                         .set_uproc_interrupt_reg = 0x00020,
176                         .set_uproc_interrupt_reg32 = 0x00024,
177                         .clr_uproc_interrupt_reg = 0x00028,
178                         .clr_uproc_interrupt_reg32 = 0x0002C,
179                         .init_feedback_reg = 0x0005C,
180                         .dump_addr_reg = 0x00064,
181                         .dump_data_reg = 0x00068,
182                         .endian_swap_reg = 0x00084
183                 }
184         },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /*  A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231         "8155: An unknown error was received"},
232         {0x00330000, 0, 0,
233         "Soft underlength error"},
234         {0x005A0000, 0, 0,
235         "Command to be cancelled not found"},
236         {0x00808000, 0, 0,
237         "Qualified success"},
238         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239         "FFFE: Soft device bus error recovered by the IOA"},
240         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241         "4101: Soft device bus fabric error"},
242         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243         "FFFC: Logical block guard error recovered by the device"},
244         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245         "FFFC: Logical block reference tag error recovered by the device"},
246         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247         "4171: Recovered scatter list tag / sequence number error"},
248         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FFFD: Recovered logical block reference tag error detected by the IOA"},
254         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255         "FFFD: Logical block guard error recovered by the IOA"},
256         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFF9: Device sector reassign successful"},
258         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFF7: Media error recovered by device rewrite procedures"},
260         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261         "7001: IOA sector reassignment successful"},
262         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF9: Soft media error. Sector reassignment recommended"},
264         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265         "FFF7: Media error recovered by IOA rewrite procedures"},
266         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FF3D: Soft PCI bus error recovered by the IOA"},
268         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269         "FFF6: Device hardware error recovered by the IOA"},
270         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FFF6: Device hardware error recovered by the device"},
272         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FF3D: Soft IOA error recovered by the IOA"},
274         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFFA: Undefined device response recovered by the IOA"},
276         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FFF6: Device bus error, message or command phase"},
278         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFE: Task Management Function failed"},
280         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Failure prediction threshold exceeded"},
282         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283         "8009: Impending cache battery pack failure"},
284         {0x02040100, 0, 0,
285         "Logical Unit in process of becoming ready"},
286         {0x02040200, 0, 0,
287         "Initializing command required"},
288         {0x02040400, 0, 0,
289         "34FF: Disk device format in progress"},
290         {0x02040C00, 0, 0,
291         "Logical unit not accessible, target port in unavailable state"},
292         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293         "9070: IOA requested reset"},
294         {0x023F0000, 0, 0,
295         "Synchronization required"},
296         {0x02408500, 0, 0,
297         "IOA microcode download required"},
298         {0x02408600, 0, 0,
299         "Device bus connection is prohibited by host"},
300         {0x024E0000, 0, 0,
301         "No ready, IOA shutdown"},
302         {0x025A0000, 0, 0,
303         "Not ready, IOA has been shutdown"},
304         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305         "3020: Storage subsystem configuration error"},
306         {0x03110B00, 0, 0,
307         "FFF5: Medium error, data unreadable, recommend reassign"},
308         {0x03110C00, 0, 0,
309         "7000: Medium error, data unreadable, do not reassign"},
310         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311         "FFF3: Disk media format bad"},
312         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313         "3002: Addressed device failed to respond to selection"},
314         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315         "3100: Device bus error"},
316         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3109: IOA timed out a device command"},
318         {0x04088000, 0, 0,
319         "3120: SCSI bus is not operational"},
320         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "4100: Hard device bus fabric error"},
322         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323         "310C: Logical block guard error detected by the device"},
324         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325         "310C: Logical block reference tag error detected by the device"},
326         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327         "4170: Scatter list tag / sequence number error"},
328         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329         "8150: Logical block CRC error on IOA to Host transfer"},
330         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Logical block sequence number error on IOA to Host transfer"},
332         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333         "310D: Logical block reference tag error detected by the IOA"},
334         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335         "310D: Logical block guard error detected by the IOA"},
336         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "9000: IOA reserved area data check"},
338         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "9001: IOA reserved area invalid data pattern"},
340         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9002: IOA reserved area LRC error"},
342         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343         "Hardware Error, IOA metadata access error"},
344         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345         "102E: Out of alternate sectors for disk storage"},
346         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347         "FFF4: Data transfer underlength error"},
348         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349         "FFF4: Data transfer overlength error"},
350         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351         "3400: Logical unit failure"},
352         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Device microcode is corrupt"},
354         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355         "8150: PCI bus error"},
356         {0x04430000, 1, 0,
357         "Unsupported device bus message received"},
358         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "FFF4: Disk device problem"},
360         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361         "8150: Permanent IOA failure"},
362         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363         "3010: Disk device returned wrong response to IOA"},
364         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365         "8151: IOA microcode error"},
366         {0x04448500, 0, 0,
367         "Device bus status error"},
368         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8157: IOA error requiring IOA reset to recover"},
370         {0x04448700, 0, 0,
371         "ATA device status error"},
372         {0x04490000, 0, 0,
373         "Message reject received from the device"},
374         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375         "8008: A permanent cache battery pack failure occurred"},
376         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377         "9090: Disk unit has been modified after the last known status"},
378         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "9081: IOA detected device error"},
380         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9082: IOA detected device error"},
382         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383         "3110: Device bus error, message or command phase"},
384         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385         "3110: SAS Command / Task Management Function failed"},
386         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387         "9091: Incorrect hardware configuration change has been detected"},
388         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389         "9073: Invalid multi-adapter configuration"},
390         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391         "4010: Incorrect connection between cascaded expanders"},
392         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393         "4020: Connections exceed IOA design limits"},
394         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4030: Incorrect multipath connection"},
396         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4110: Unsupported enclosure function"},
398         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4120: SAS cable VPD cannot be read"},
400         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "FFF4: Command to logical unit failed"},
402         {0x05240000, 1, 0,
403         "Illegal request, invalid request type or request packet"},
404         {0x05250000, 0, 0,
405         "Illegal request, invalid resource handle"},
406         {0x05258000, 0, 0,
407         "Illegal request, commands not allowed to this device"},
408         {0x05258100, 0, 0,
409         "Illegal request, command not allowed to a secondary adapter"},
410         {0x05258200, 0, 0,
411         "Illegal request, command not allowed to a non-optimized resource"},
412         {0x05260000, 0, 0,
413         "Illegal request, invalid field in parameter list"},
414         {0x05260100, 0, 0,
415         "Illegal request, parameter not supported"},
416         {0x05260200, 0, 0,
417         "Illegal request, parameter value invalid"},
418         {0x052C0000, 0, 0,
419         "Illegal request, command sequence error"},
420         {0x052C8000, 1, 0,
421         "Illegal request, dual adapter support not enabled"},
422         {0x052C8100, 1, 0,
423         "Illegal request, another cable connector was physically disabled"},
424         {0x054E8000, 1, 0,
425         "Illegal request, inconsistent group id/group count"},
426         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427         "9031: Array protection temporarily suspended, protection resuming"},
428         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429         "9040: Array protection temporarily suspended, protection resuming"},
430         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431         "4080: IOA exceeded maximum operating temperature"},
432         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433         "4085: Service required"},
434         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435         "3140: Device bus not ready to ready transition"},
436         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "FFFB: SCSI bus was reset"},
438         {0x06290500, 0, 0,
439         "FFFE: SCSI bus transition to single ended"},
440         {0x06290600, 0, 0,
441         "FFFE: SCSI bus transition to LVD"},
442         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443         "FFFB: SCSI bus was reset by another initiator"},
444         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445         "3029: A device replacement has occurred"},
446         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447         "4102: Device bus fabric performance degradation"},
448         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449         "9051: IOA cache data exists for a missing or failed device"},
450         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9025: Disk unit is not supported at its physical location"},
454         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455         "3020: IOA detected a SCSI bus configuration error"},
456         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457         "3150: SCSI bus configuration error"},
458         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459         "9074: Asymmetric advanced function disk configuration"},
460         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461         "4040: Incomplete multipath connection between IOA and enclosure"},
462         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463         "4041: Incomplete multipath connection between enclosure and device"},
464         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465         "9075: Incomplete multipath connection between IOA and remote IOA"},
466         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467         "9076: Configuration error, missing remote IOA"},
468         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469         "4050: Enclosure does not support a required multipath function"},
470         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471         "4121: Configuration error, required cable is missing"},
472         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4122: Cable is not plugged into the correct location on remote IOA"},
474         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4123: Configuration error, invalid cable vital product data"},
476         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4124: Configuration error, both cable ends are plugged into the same IOA"},
478         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4070: Logically bad block written on device"},
480         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481         "9041: Array protection temporarily suspended"},
482         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483         "9042: Corrupt array parity detected on specified device"},
484         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9030: Array no longer protected due to missing or failed disk unit"},
486         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9071: Link operational transition"},
488         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9072: Link not operational transition"},
490         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9032: Array exposed but still protected"},
492         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493         "70DD: Device forced failed by disrupt device command"},
494         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495         "4061: Multipath redundancy level got better"},
496         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497         "4060: Multipath redundancy level got worse"},
498         {0x07270000, 0, 0,
499         "Failure due to other device"},
500         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501         "9008: IOA does not support functions expected by devices"},
502         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503         "9010: Cache data associated with attached devices cannot be found"},
504         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505         "9011: Cache data belongs to devices other than those attached"},
506         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507         "9020: Array missing 2 or more devices with only 1 device present"},
508         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9021: Array missing 2 or more devices with 2 or more devices present"},
510         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9022: Exposed array is missing a required device"},
512         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9023: Array member(s) not at required physical locations"},
514         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9024: Array not functional due to present hardware configuration"},
516         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9026: Array not functional due to present hardware configuration"},
518         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9027: Array is missing a device and parity is out of sync"},
520         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9028: Maximum number of arrays already exist"},
522         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9050: Required cache data cannot be located for a disk unit"},
524         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9052: Cache data exists for a device that has been modified"},
526         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9054: IOA resources not available due to previous problems"},
528         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9092: Disk unit requires initialization before use"},
530         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9029: Incorrect hardware configuration change has been detected"},
532         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9060: One or more disk pairs are missing from an array"},
534         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9061: One or more disks are missing from an array"},
536         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9062: One or more disks are missing from an array"},
538         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9063: Maximum number of functional arrays has been exceeded"},
540         {0x07279A00, 0, 0,
541         "Data protect, other volume set problem"},
542         {0x0B260000, 0, 0,
543         "Aborted command, invalid descriptor"},
544         {0x0B3F9000, 0, 0,
545         "Target operating conditions have changed, dual adapter takeover"},
546         {0x0B530200, 0, 0,
547         "Aborted command, medium removal prevented"},
548         {0x0B5A0000, 0, 0,
549         "Command terminated by host"},
550         {0x0B5B8000, 0, 0,
551         "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
556         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
557         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
562         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
563         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571  *  Function Prototypes
572  */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578                                    enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582  * ipr_trc_hook - Add a trace entry to the driver trace
583  * @ipr_cmd:    ipr command struct
584  * @type:               trace type
585  * @add_data:   additional data
586  *
587  * Return value:
588  *      none
589  **/
590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591                          u8 type, u32 add_data)
592 {
593         struct ipr_trace_entry *trace_entry;
594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595
596         trace_entry = &ioa_cfg->trace[atomic_add_return
597                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
598         trace_entry->time = jiffies;
599         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
600         trace_entry->type = type;
601         if (ipr_cmd->ioa_cfg->sis64)
602                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
603         else
604                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
605         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
606         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
607         trace_entry->u.add_data = add_data;
608         wmb();
609 }
610 #else
611 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
612 #endif
613
614 /**
615  * ipr_lock_and_done - Acquire lock and complete command
616  * @ipr_cmd:    ipr command struct
617  *
618  * Return value:
619  *      none
620  **/
621 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
622 {
623         unsigned long lock_flags;
624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
625
626         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
627         ipr_cmd->done(ipr_cmd);
628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
629 }
630
631 /**
632  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
633  * @ipr_cmd:    ipr command struct
634  *
635  * Return value:
636  *      none
637  **/
638 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
639 {
640         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
641         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
642         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
643         dma_addr_t dma_addr = ipr_cmd->dma_addr;
644         int hrrq_id;
645
646         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
647         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
648         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
649         ioarcb->data_transfer_length = 0;
650         ioarcb->read_data_transfer_length = 0;
651         ioarcb->ioadl_len = 0;
652         ioarcb->read_ioadl_len = 0;
653
654         if (ipr_cmd->ioa_cfg->sis64) {
655                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
656                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
657                 ioasa64->u.gata.status = 0;
658         } else {
659                 ioarcb->write_ioadl_addr =
660                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
661                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
662                 ioasa->u.gata.status = 0;
663         }
664
665         ioasa->hdr.ioasc = 0;
666         ioasa->hdr.residual_data_len = 0;
667         ipr_cmd->scsi_cmd = NULL;
668         ipr_cmd->qc = NULL;
669         ipr_cmd->sense_buffer[0] = 0;
670         ipr_cmd->dma_use_sg = 0;
671 }
672
673 /**
674  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
675  * @ipr_cmd:    ipr command struct
676  *
677  * Return value:
678  *      none
679  **/
680 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
681                               void (*fast_done) (struct ipr_cmnd *))
682 {
683         ipr_reinit_ipr_cmnd(ipr_cmd);
684         ipr_cmd->u.scratch = 0;
685         ipr_cmd->sibling = NULL;
686         ipr_cmd->fast_done = fast_done;
687         init_timer(&ipr_cmd->timer);
688 }
689
690 /**
691  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
692  * @ioa_cfg:    ioa config struct
693  *
694  * Return value:
695  *      pointer to ipr command struct
696  **/
697 static
698 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
699 {
700         struct ipr_cmnd *ipr_cmd = NULL;
701
702         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
703                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
704                         struct ipr_cmnd, queue);
705                 list_del(&ipr_cmd->queue);
706         }
707
708
709         return ipr_cmd;
710 }
711
712 /**
713  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
714  * @ioa_cfg:    ioa config struct
715  *
716  * Return value:
717  *      pointer to ipr command struct
718  **/
719 static
720 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
721 {
722         struct ipr_cmnd *ipr_cmd =
723                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
724         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
725         return ipr_cmd;
726 }
727
728 /**
729  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
730  * @ioa_cfg:    ioa config struct
731  * @clr_ints:     interrupts to clear
732  *
733  * This function masks all interrupts on the adapter, then clears the
734  * interrupts specified in the mask
735  *
736  * Return value:
737  *      none
738  **/
739 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
740                                           u32 clr_ints)
741 {
742         volatile u32 int_reg;
743         int i;
744
745         /* Stop new interrupts */
746         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
747                 spin_lock(&ioa_cfg->hrrq[i]._lock);
748                 ioa_cfg->hrrq[i].allow_interrupts = 0;
749                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
750         }
751         wmb();
752
753         /* Set interrupt mask to stop all new interrupts */
754         if (ioa_cfg->sis64)
755                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
756         else
757                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758
759         /* Clear any pending interrupts */
760         if (ioa_cfg->sis64)
761                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
762         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
763         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
764 }
765
766 /**
767  * ipr_save_pcix_cmd_reg - Save PCI-X command register
768  * @ioa_cfg:    ioa config struct
769  *
770  * Return value:
771  *      0 on success / -EIO on failure
772  **/
773 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
774 {
775         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
776
777         if (pcix_cmd_reg == 0)
778                 return 0;
779
780         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
781                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
782                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
783                 return -EIO;
784         }
785
786         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
787         return 0;
788 }
789
790 /**
791  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792  * @ioa_cfg:    ioa config struct
793  *
794  * Return value:
795  *      0 on success / -EIO on failure
796  **/
797 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
798 {
799         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
800
801         if (pcix_cmd_reg) {
802                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
803                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
804                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
805                         return -EIO;
806                 }
807         }
808
809         return 0;
810 }
811
812 /**
813  * ipr_sata_eh_done - done function for aborted SATA commands
814  * @ipr_cmd:    ipr command struct
815  *
816  * This function is invoked for ops generated to SATA
817  * devices which are being aborted.
818  *
819  * Return value:
820  *      none
821  **/
822 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
823 {
824         struct ata_queued_cmd *qc = ipr_cmd->qc;
825         struct ipr_sata_port *sata_port = qc->ap->private_data;
826
827         qc->err_mask |= AC_ERR_OTHER;
828         sata_port->ioasa.status |= ATA_BUSY;
829         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
830         ata_qc_complete(qc);
831 }
832
833 /**
834  * ipr_scsi_eh_done - mid-layer done function for aborted ops
835  * @ipr_cmd:    ipr command struct
836  *
837  * This function is invoked by the interrupt handler for
838  * ops generated by the SCSI mid-layer which are being aborted.
839  *
840  * Return value:
841  *      none
842  **/
843 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
844 {
845         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
846
847         scsi_cmd->result |= (DID_ERROR << 16);
848
849         scsi_dma_unmap(ipr_cmd->scsi_cmd);
850         scsi_cmd->scsi_done(scsi_cmd);
851         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
852 }
853
854 /**
855  * ipr_fail_all_ops - Fails all outstanding ops.
856  * @ioa_cfg:    ioa config struct
857  *
858  * This function fails all outstanding ops.
859  *
860  * Return value:
861  *      none
862  **/
863 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
864 {
865         struct ipr_cmnd *ipr_cmd, *temp;
866         struct ipr_hrr_queue *hrrq;
867
868         ENTER;
869         for_each_hrrq(hrrq, ioa_cfg) {
870                 spin_lock(&hrrq->_lock);
871                 list_for_each_entry_safe(ipr_cmd,
872                                         temp, &hrrq->hrrq_pending_q, queue) {
873                         list_del(&ipr_cmd->queue);
874
875                         ipr_cmd->s.ioasa.hdr.ioasc =
876                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
877                         ipr_cmd->s.ioasa.hdr.ilid =
878                                 cpu_to_be32(IPR_DRIVER_ILID);
879
880                         if (ipr_cmd->scsi_cmd)
881                                 ipr_cmd->done = ipr_scsi_eh_done;
882                         else if (ipr_cmd->qc)
883                                 ipr_cmd->done = ipr_sata_eh_done;
884
885                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
886                                      IPR_IOASC_IOA_WAS_RESET);
887                         del_timer(&ipr_cmd->timer);
888                         ipr_cmd->done(ipr_cmd);
889                 }
890                 spin_unlock(&hrrq->_lock);
891         }
892         LEAVE;
893 }
894
895 /**
896  * ipr_send_command -  Send driver initiated requests.
897  * @ipr_cmd:            ipr command struct
898  *
899  * This function sends a command to the adapter using the correct write call.
900  * In the case of sis64, calculate the ioarcb size required. Then or in the
901  * appropriate bits.
902  *
903  * Return value:
904  *      none
905  **/
906 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
907 {
908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
909         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
910
911         if (ioa_cfg->sis64) {
912                 /* The default size is 256 bytes */
913                 send_dma_addr |= 0x1;
914
915                 /* If the number of ioadls * size of ioadl > 128 bytes,
916                    then use a 512 byte ioarcb */
917                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
918                         send_dma_addr |= 0x4;
919                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
920         } else
921                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
922 }
923
924 /**
925  * ipr_do_req -  Send driver initiated requests.
926  * @ipr_cmd:            ipr command struct
927  * @done:                       done function
928  * @timeout_func:       timeout function
929  * @timeout:            timeout value
930  *
931  * This function sends the specified command to the adapter with the
932  * timeout given. The done function is invoked on command completion.
933  *
934  * Return value:
935  *      none
936  **/
937 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
938                        void (*done) (struct ipr_cmnd *),
939                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
940 {
941         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
942
943         ipr_cmd->done = done;
944
945         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
946         ipr_cmd->timer.expires = jiffies + timeout;
947         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
948
949         add_timer(&ipr_cmd->timer);
950
951         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
952
953         ipr_send_command(ipr_cmd);
954 }
955
956 /**
957  * ipr_internal_cmd_done - Op done function for an internally generated op.
958  * @ipr_cmd:    ipr command struct
959  *
960  * This function is the op done function for an internally generated,
961  * blocking op. It simply wakes the sleeping thread.
962  *
963  * Return value:
964  *      none
965  **/
966 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
967 {
968         if (ipr_cmd->sibling)
969                 ipr_cmd->sibling = NULL;
970         else
971                 complete(&ipr_cmd->completion);
972 }
973
974 /**
975  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
976  * @ipr_cmd:    ipr command struct
977  * @dma_addr:   dma address
978  * @len:        transfer length
979  * @flags:      ioadl flag value
980  *
981  * This function initializes an ioadl in the case where there is only a single
982  * descriptor.
983  *
984  * Return value:
985  *      nothing
986  **/
987 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
988                            u32 len, int flags)
989 {
990         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
991         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
992
993         ipr_cmd->dma_use_sg = 1;
994
995         if (ipr_cmd->ioa_cfg->sis64) {
996                 ioadl64->flags = cpu_to_be32(flags);
997                 ioadl64->data_len = cpu_to_be32(len);
998                 ioadl64->address = cpu_to_be64(dma_addr);
999
1000                 ipr_cmd->ioarcb.ioadl_len =
1001                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1002                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003         } else {
1004                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1005                 ioadl->address = cpu_to_be32(dma_addr);
1006
1007                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1008                         ipr_cmd->ioarcb.read_ioadl_len =
1009                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1010                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1011                 } else {
1012                         ipr_cmd->ioarcb.ioadl_len =
1013                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1015                 }
1016         }
1017 }
1018
1019 /**
1020  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1021  * @ipr_cmd:    ipr command struct
1022  * @timeout_func:       function to invoke if command times out
1023  * @timeout:    timeout
1024  *
1025  * Return value:
1026  *      none
1027  **/
1028 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1029                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1030                                   u32 timeout)
1031 {
1032         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1033
1034         init_completion(&ipr_cmd->completion);
1035         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1036
1037         spin_unlock_irq(ioa_cfg->host->host_lock);
1038         wait_for_completion(&ipr_cmd->completion);
1039         spin_lock_irq(ioa_cfg->host->host_lock);
1040 }
1041
1042 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1043 {
1044         if (ioa_cfg->hrrq_num == 1)
1045                 return 0;
1046         else
1047                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1048 }
1049
1050 /**
1051  * ipr_send_hcam - Send an HCAM to the adapter.
1052  * @ioa_cfg:    ioa config struct
1053  * @type:               HCAM type
1054  * @hostrcb:    hostrcb struct
1055  *
1056  * This function will send a Host Controlled Async command to the adapter.
1057  * If HCAMs are currently not allowed to be issued to the adapter, it will
1058  * place the hostrcb on the free queue.
1059  *
1060  * Return value:
1061  *      none
1062  **/
1063 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1064                           struct ipr_hostrcb *hostrcb)
1065 {
1066         struct ipr_cmnd *ipr_cmd;
1067         struct ipr_ioarcb *ioarcb;
1068
1069         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1070                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1071                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1072                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1073
1074                 ipr_cmd->u.hostrcb = hostrcb;
1075                 ioarcb = &ipr_cmd->ioarcb;
1076
1077                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1078                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1079                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1080                 ioarcb->cmd_pkt.cdb[1] = type;
1081                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1082                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1083
1084                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1085                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1086
1087                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1088                         ipr_cmd->done = ipr_process_ccn;
1089                 else
1090                         ipr_cmd->done = ipr_process_error;
1091
1092                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1093
1094                 ipr_send_command(ipr_cmd);
1095         } else {
1096                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1097         }
1098 }
1099
1100 /**
1101  * ipr_update_ata_class - Update the ata class in the resource entry
1102  * @res:        resource entry struct
1103  * @proto:      cfgte device bus protocol value
1104  *
1105  * Return value:
1106  *      none
1107  **/
1108 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1109 {
1110         switch (proto) {
1111         case IPR_PROTO_SATA:
1112         case IPR_PROTO_SAS_STP:
1113                 res->ata_class = ATA_DEV_ATA;
1114                 break;
1115         case IPR_PROTO_SATA_ATAPI:
1116         case IPR_PROTO_SAS_STP_ATAPI:
1117                 res->ata_class = ATA_DEV_ATAPI;
1118                 break;
1119         default:
1120                 res->ata_class = ATA_DEV_UNKNOWN;
1121                 break;
1122         };
1123 }
1124
1125 /**
1126  * ipr_init_res_entry - Initialize a resource entry struct.
1127  * @res:        resource entry struct
1128  * @cfgtew:     config table entry wrapper struct
1129  *
1130  * Return value:
1131  *      none
1132  **/
1133 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1134                                struct ipr_config_table_entry_wrapper *cfgtew)
1135 {
1136         int found = 0;
1137         unsigned int proto;
1138         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1139         struct ipr_resource_entry *gscsi_res = NULL;
1140
1141         res->needs_sync_complete = 0;
1142         res->in_erp = 0;
1143         res->add_to_ml = 0;
1144         res->del_from_ml = 0;
1145         res->resetting_device = 0;
1146         res->reset_occurred = 0;
1147         res->sdev = NULL;
1148         res->sata_port = NULL;
1149
1150         if (ioa_cfg->sis64) {
1151                 proto = cfgtew->u.cfgte64->proto;
1152                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1153                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1154                 res->type = cfgtew->u.cfgte64->res_type;
1155
1156                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1157                         sizeof(res->res_path));
1158
1159                 res->bus = 0;
1160                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1161                         sizeof(res->dev_lun.scsi_lun));
1162                 res->lun = scsilun_to_int(&res->dev_lun);
1163
1164                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1165                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1166                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1167                                         found = 1;
1168                                         res->target = gscsi_res->target;
1169                                         break;
1170                                 }
1171                         }
1172                         if (!found) {
1173                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1174                                                                   ioa_cfg->max_devs_supported);
1175                                 set_bit(res->target, ioa_cfg->target_ids);
1176                         }
1177                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1178                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1179                         res->target = 0;
1180                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1181                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1182                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1183                                                           ioa_cfg->max_devs_supported);
1184                         set_bit(res->target, ioa_cfg->array_ids);
1185                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1186                         res->bus = IPR_VSET_VIRTUAL_BUS;
1187                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1188                                                           ioa_cfg->max_devs_supported);
1189                         set_bit(res->target, ioa_cfg->vset_ids);
1190                 } else {
1191                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1192                                                           ioa_cfg->max_devs_supported);
1193                         set_bit(res->target, ioa_cfg->target_ids);
1194                 }
1195         } else {
1196                 proto = cfgtew->u.cfgte->proto;
1197                 res->qmodel = IPR_QUEUEING_MODEL(res);
1198                 res->flags = cfgtew->u.cfgte->flags;
1199                 if (res->flags & IPR_IS_IOA_RESOURCE)
1200                         res->type = IPR_RES_TYPE_IOAFP;
1201                 else
1202                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1203
1204                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1205                 res->target = cfgtew->u.cfgte->res_addr.target;
1206                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1207                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1208         }
1209
1210         ipr_update_ata_class(res, proto);
1211 }
1212
1213 /**
1214  * ipr_is_same_device - Determine if two devices are the same.
1215  * @res:        resource entry struct
1216  * @cfgtew:     config table entry wrapper struct
1217  *
1218  * Return value:
1219  *      1 if the devices are the same / 0 otherwise
1220  **/
1221 static int ipr_is_same_device(struct ipr_resource_entry *res,
1222                               struct ipr_config_table_entry_wrapper *cfgtew)
1223 {
1224         if (res->ioa_cfg->sis64) {
1225                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1226                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1227                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1228                                         sizeof(cfgtew->u.cfgte64->lun))) {
1229                         return 1;
1230                 }
1231         } else {
1232                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1233                     res->target == cfgtew->u.cfgte->res_addr.target &&
1234                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1235                         return 1;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /**
1242  * __ipr_format_res_path - Format the resource path for printing.
1243  * @res_path:   resource path
1244  * @buf:        buffer
1245  * @len:        length of buffer provided
1246  *
1247  * Return value:
1248  *      pointer to buffer
1249  **/
1250 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1251 {
1252         int i;
1253         char *p = buffer;
1254
1255         *p = '\0';
1256         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1257         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1258                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1259
1260         return buffer;
1261 }
1262
1263 /**
1264  * ipr_format_res_path - Format the resource path for printing.
1265  * @ioa_cfg:    ioa config struct
1266  * @res_path:   resource path
1267  * @buf:        buffer
1268  * @len:        length of buffer provided
1269  *
1270  * Return value:
1271  *      pointer to buffer
1272  **/
1273 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1274                                  u8 *res_path, char *buffer, int len)
1275 {
1276         char *p = buffer;
1277
1278         *p = '\0';
1279         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1280         __ipr_format_res_path(res_path, p, len - (buffer - p));
1281         return buffer;
1282 }
1283
1284 /**
1285  * ipr_update_res_entry - Update the resource entry.
1286  * @res:        resource entry struct
1287  * @cfgtew:     config table entry wrapper struct
1288  *
1289  * Return value:
1290  *      none
1291  **/
1292 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1293                                  struct ipr_config_table_entry_wrapper *cfgtew)
1294 {
1295         char buffer[IPR_MAX_RES_PATH_LENGTH];
1296         unsigned int proto;
1297         int new_path = 0;
1298
1299         if (res->ioa_cfg->sis64) {
1300                 res->flags = cfgtew->u.cfgte64->flags;
1301                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1302                 res->type = cfgtew->u.cfgte64->res_type;
1303
1304                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1305                         sizeof(struct ipr_std_inq_data));
1306
1307                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1308                 proto = cfgtew->u.cfgte64->proto;
1309                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1310                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1311
1312                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1313                         sizeof(res->dev_lun.scsi_lun));
1314
1315                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1316                                         sizeof(res->res_path))) {
1317                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1318                                 sizeof(res->res_path));
1319                         new_path = 1;
1320                 }
1321
1322                 if (res->sdev && new_path)
1323                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1324                                     ipr_format_res_path(res->ioa_cfg,
1325                                         res->res_path, buffer, sizeof(buffer)));
1326         } else {
1327                 res->flags = cfgtew->u.cfgte->flags;
1328                 if (res->flags & IPR_IS_IOA_RESOURCE)
1329                         res->type = IPR_RES_TYPE_IOAFP;
1330                 else
1331                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1332
1333                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1334                         sizeof(struct ipr_std_inq_data));
1335
1336                 res->qmodel = IPR_QUEUEING_MODEL(res);
1337                 proto = cfgtew->u.cfgte->proto;
1338                 res->res_handle = cfgtew->u.cfgte->res_handle;
1339         }
1340
1341         ipr_update_ata_class(res, proto);
1342 }
1343
1344 /**
1345  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1346  *                        for the resource.
1347  * @res:        resource entry struct
1348  * @cfgtew:     config table entry wrapper struct
1349  *
1350  * Return value:
1351  *      none
1352  **/
1353 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1354 {
1355         struct ipr_resource_entry *gscsi_res = NULL;
1356         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1357
1358         if (!ioa_cfg->sis64)
1359                 return;
1360
1361         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1362                 clear_bit(res->target, ioa_cfg->array_ids);
1363         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1364                 clear_bit(res->target, ioa_cfg->vset_ids);
1365         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1366                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1367                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1368                                 return;
1369                 clear_bit(res->target, ioa_cfg->target_ids);
1370
1371         } else if (res->bus == 0)
1372                 clear_bit(res->target, ioa_cfg->target_ids);
1373 }
1374
1375 /**
1376  * ipr_handle_config_change - Handle a config change from the adapter
1377  * @ioa_cfg:    ioa config struct
1378  * @hostrcb:    hostrcb
1379  *
1380  * Return value:
1381  *      none
1382  **/
1383 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1384                                      struct ipr_hostrcb *hostrcb)
1385 {
1386         struct ipr_resource_entry *res = NULL;
1387         struct ipr_config_table_entry_wrapper cfgtew;
1388         __be32 cc_res_handle;
1389
1390         u32 is_ndn = 1;
1391
1392         if (ioa_cfg->sis64) {
1393                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1394                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1395         } else {
1396                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1397                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1398         }
1399
1400         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1401                 if (res->res_handle == cc_res_handle) {
1402                         is_ndn = 0;
1403                         break;
1404                 }
1405         }
1406
1407         if (is_ndn) {
1408                 if (list_empty(&ioa_cfg->free_res_q)) {
1409                         ipr_send_hcam(ioa_cfg,
1410                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1411                                       hostrcb);
1412                         return;
1413                 }
1414
1415                 res = list_entry(ioa_cfg->free_res_q.next,
1416                                  struct ipr_resource_entry, queue);
1417
1418                 list_del(&res->queue);
1419                 ipr_init_res_entry(res, &cfgtew);
1420                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1421         }
1422
1423         ipr_update_res_entry(res, &cfgtew);
1424
1425         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1426                 if (res->sdev) {
1427                         res->del_from_ml = 1;
1428                         res->res_handle = IPR_INVALID_RES_HANDLE;
1429                         if (ioa_cfg->allow_ml_add_del)
1430                                 schedule_work(&ioa_cfg->work_q);
1431                 } else {
1432                         ipr_clear_res_target(res);
1433                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434                 }
1435         } else if (!res->sdev || res->del_from_ml) {
1436                 res->add_to_ml = 1;
1437                 if (ioa_cfg->allow_ml_add_del)
1438                         schedule_work(&ioa_cfg->work_q);
1439         }
1440
1441         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1442 }
1443
1444 /**
1445  * ipr_process_ccn - Op done function for a CCN.
1446  * @ipr_cmd:    ipr command struct
1447  *
1448  * This function is the op done function for a configuration
1449  * change notification host controlled async from the adapter.
1450  *
1451  * Return value:
1452  *      none
1453  **/
1454 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1455 {
1456         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1457         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1458         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1459
1460         list_del(&hostrcb->queue);
1461         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1462
1463         if (ioasc) {
1464                 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1465                         dev_err(&ioa_cfg->pdev->dev,
1466                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1467
1468                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1469         } else {
1470                 ipr_handle_config_change(ioa_cfg, hostrcb);
1471         }
1472 }
1473
1474 /**
1475  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1476  * @i:          index into buffer
1477  * @buf:                string to modify
1478  *
1479  * This function will strip all trailing whitespace, pad the end
1480  * of the string with a single space, and NULL terminate the string.
1481  *
1482  * Return value:
1483  *      new length of string
1484  **/
1485 static int strip_and_pad_whitespace(int i, char *buf)
1486 {
1487         while (i && buf[i] == ' ')
1488                 i--;
1489         buf[i+1] = ' ';
1490         buf[i+2] = '\0';
1491         return i + 2;
1492 }
1493
1494 /**
1495  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1496  * @prefix:             string to print at start of printk
1497  * @hostrcb:    hostrcb pointer
1498  * @vpd:                vendor/product id/sn struct
1499  *
1500  * Return value:
1501  *      none
1502  **/
1503 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1504                                 struct ipr_vpd *vpd)
1505 {
1506         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1507         int i = 0;
1508
1509         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1510         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1511
1512         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1513         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1514
1515         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1516         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1517
1518         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1519 }
1520
1521 /**
1522  * ipr_log_vpd - Log the passed VPD to the error log.
1523  * @vpd:                vendor/product id/sn struct
1524  *
1525  * Return value:
1526  *      none
1527  **/
1528 static void ipr_log_vpd(struct ipr_vpd *vpd)
1529 {
1530         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1531                     + IPR_SERIAL_NUM_LEN];
1532
1533         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1534         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1535                IPR_PROD_ID_LEN);
1536         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1537         ipr_err("Vendor/Product ID: %s\n", buffer);
1538
1539         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1540         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1541         ipr_err("    Serial Number: %s\n", buffer);
1542 }
1543
1544 /**
1545  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1546  * @prefix:             string to print at start of printk
1547  * @hostrcb:    hostrcb pointer
1548  * @vpd:                vendor/product id/sn/wwn struct
1549  *
1550  * Return value:
1551  *      none
1552  **/
1553 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1554                                     struct ipr_ext_vpd *vpd)
1555 {
1556         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1557         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1558                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1559 }
1560
1561 /**
1562  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1563  * @vpd:                vendor/product id/sn/wwn struct
1564  *
1565  * Return value:
1566  *      none
1567  **/
1568 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1569 {
1570         ipr_log_vpd(&vpd->vpd);
1571         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1572                 be32_to_cpu(vpd->wwid[1]));
1573 }
1574
1575 /**
1576  * ipr_log_enhanced_cache_error - Log a cache error.
1577  * @ioa_cfg:    ioa config struct
1578  * @hostrcb:    hostrcb struct
1579  *
1580  * Return value:
1581  *      none
1582  **/
1583 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1584                                          struct ipr_hostrcb *hostrcb)
1585 {
1586         struct ipr_hostrcb_type_12_error *error;
1587
1588         if (ioa_cfg->sis64)
1589                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1590         else
1591                 error = &hostrcb->hcam.u.error.u.type_12_error;
1592
1593         ipr_err("-----Current Configuration-----\n");
1594         ipr_err("Cache Directory Card Information:\n");
1595         ipr_log_ext_vpd(&error->ioa_vpd);
1596         ipr_err("Adapter Card Information:\n");
1597         ipr_log_ext_vpd(&error->cfc_vpd);
1598
1599         ipr_err("-----Expected Configuration-----\n");
1600         ipr_err("Cache Directory Card Information:\n");
1601         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1602         ipr_err("Adapter Card Information:\n");
1603         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1604
1605         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1606                      be32_to_cpu(error->ioa_data[0]),
1607                      be32_to_cpu(error->ioa_data[1]),
1608                      be32_to_cpu(error->ioa_data[2]));
1609 }
1610
1611 /**
1612  * ipr_log_cache_error - Log a cache error.
1613  * @ioa_cfg:    ioa config struct
1614  * @hostrcb:    hostrcb struct
1615  *
1616  * Return value:
1617  *      none
1618  **/
1619 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1620                                 struct ipr_hostrcb *hostrcb)
1621 {
1622         struct ipr_hostrcb_type_02_error *error =
1623                 &hostrcb->hcam.u.error.u.type_02_error;
1624
1625         ipr_err("-----Current Configuration-----\n");
1626         ipr_err("Cache Directory Card Information:\n");
1627         ipr_log_vpd(&error->ioa_vpd);
1628         ipr_err("Adapter Card Information:\n");
1629         ipr_log_vpd(&error->cfc_vpd);
1630
1631         ipr_err("-----Expected Configuration-----\n");
1632         ipr_err("Cache Directory Card Information:\n");
1633         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1634         ipr_err("Adapter Card Information:\n");
1635         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1636
1637         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1638                      be32_to_cpu(error->ioa_data[0]),
1639                      be32_to_cpu(error->ioa_data[1]),
1640                      be32_to_cpu(error->ioa_data[2]));
1641 }
1642
1643 /**
1644  * ipr_log_enhanced_config_error - Log a configuration error.
1645  * @ioa_cfg:    ioa config struct
1646  * @hostrcb:    hostrcb struct
1647  *
1648  * Return value:
1649  *      none
1650  **/
1651 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1652                                           struct ipr_hostrcb *hostrcb)
1653 {
1654         int errors_logged, i;
1655         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1656         struct ipr_hostrcb_type_13_error *error;
1657
1658         error = &hostrcb->hcam.u.error.u.type_13_error;
1659         errors_logged = be32_to_cpu(error->errors_logged);
1660
1661         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1662                 be32_to_cpu(error->errors_detected), errors_logged);
1663
1664         dev_entry = error->dev;
1665
1666         for (i = 0; i < errors_logged; i++, dev_entry++) {
1667                 ipr_err_separator;
1668
1669                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1670                 ipr_log_ext_vpd(&dev_entry->vpd);
1671
1672                 ipr_err("-----New Device Information-----\n");
1673                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1674
1675                 ipr_err("Cache Directory Card Information:\n");
1676                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677
1678                 ipr_err("Adapter Card Information:\n");
1679                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680         }
1681 }
1682
1683 /**
1684  * ipr_log_sis64_config_error - Log a device error.
1685  * @ioa_cfg:    ioa config struct
1686  * @hostrcb:    hostrcb struct
1687  *
1688  * Return value:
1689  *      none
1690  **/
1691 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692                                        struct ipr_hostrcb *hostrcb)
1693 {
1694         int errors_logged, i;
1695         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1696         struct ipr_hostrcb_type_23_error *error;
1697         char buffer[IPR_MAX_RES_PATH_LENGTH];
1698
1699         error = &hostrcb->hcam.u.error64.u.type_23_error;
1700         errors_logged = be32_to_cpu(error->errors_logged);
1701
1702         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1703                 be32_to_cpu(error->errors_detected), errors_logged);
1704
1705         dev_entry = error->dev;
1706
1707         for (i = 0; i < errors_logged; i++, dev_entry++) {
1708                 ipr_err_separator;
1709
1710                 ipr_err("Device %d : %s", i + 1,
1711                         __ipr_format_res_path(dev_entry->res_path,
1712                                               buffer, sizeof(buffer)));
1713                 ipr_log_ext_vpd(&dev_entry->vpd);
1714
1715                 ipr_err("-----New Device Information-----\n");
1716                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1717
1718                 ipr_err("Cache Directory Card Information:\n");
1719                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1720
1721                 ipr_err("Adapter Card Information:\n");
1722                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1723         }
1724 }
1725
1726 /**
1727  * ipr_log_config_error - Log a configuration error.
1728  * @ioa_cfg:    ioa config struct
1729  * @hostrcb:    hostrcb struct
1730  *
1731  * Return value:
1732  *      none
1733  **/
1734 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1735                                  struct ipr_hostrcb *hostrcb)
1736 {
1737         int errors_logged, i;
1738         struct ipr_hostrcb_device_data_entry *dev_entry;
1739         struct ipr_hostrcb_type_03_error *error;
1740
1741         error = &hostrcb->hcam.u.error.u.type_03_error;
1742         errors_logged = be32_to_cpu(error->errors_logged);
1743
1744         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1745                 be32_to_cpu(error->errors_detected), errors_logged);
1746
1747         dev_entry = error->dev;
1748
1749         for (i = 0; i < errors_logged; i++, dev_entry++) {
1750                 ipr_err_separator;
1751
1752                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1753                 ipr_log_vpd(&dev_entry->vpd);
1754
1755                 ipr_err("-----New Device Information-----\n");
1756                 ipr_log_vpd(&dev_entry->new_vpd);
1757
1758                 ipr_err("Cache Directory Card Information:\n");
1759                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1760
1761                 ipr_err("Adapter Card Information:\n");
1762                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1763
1764                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1765                         be32_to_cpu(dev_entry->ioa_data[0]),
1766                         be32_to_cpu(dev_entry->ioa_data[1]),
1767                         be32_to_cpu(dev_entry->ioa_data[2]),
1768                         be32_to_cpu(dev_entry->ioa_data[3]),
1769                         be32_to_cpu(dev_entry->ioa_data[4]));
1770         }
1771 }
1772
1773 /**
1774  * ipr_log_enhanced_array_error - Log an array configuration error.
1775  * @ioa_cfg:    ioa config struct
1776  * @hostrcb:    hostrcb struct
1777  *
1778  * Return value:
1779  *      none
1780  **/
1781 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1782                                          struct ipr_hostrcb *hostrcb)
1783 {
1784         int i, num_entries;
1785         struct ipr_hostrcb_type_14_error *error;
1786         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1787         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1788
1789         error = &hostrcb->hcam.u.error.u.type_14_error;
1790
1791         ipr_err_separator;
1792
1793         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1794                 error->protection_level,
1795                 ioa_cfg->host->host_no,
1796                 error->last_func_vset_res_addr.bus,
1797                 error->last_func_vset_res_addr.target,
1798                 error->last_func_vset_res_addr.lun);
1799
1800         ipr_err_separator;
1801
1802         array_entry = error->array_member;
1803         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1804                             ARRAY_SIZE(error->array_member));
1805
1806         for (i = 0; i < num_entries; i++, array_entry++) {
1807                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1808                         continue;
1809
1810                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1811                         ipr_err("Exposed Array Member %d:\n", i);
1812                 else
1813                         ipr_err("Array Member %d:\n", i);
1814
1815                 ipr_log_ext_vpd(&array_entry->vpd);
1816                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1817                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1818                                  "Expected Location");
1819
1820                 ipr_err_separator;
1821         }
1822 }
1823
1824 /**
1825  * ipr_log_array_error - Log an array configuration error.
1826  * @ioa_cfg:    ioa config struct
1827  * @hostrcb:    hostrcb struct
1828  *
1829  * Return value:
1830  *      none
1831  **/
1832 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1833                                 struct ipr_hostrcb *hostrcb)
1834 {
1835         int i;
1836         struct ipr_hostrcb_type_04_error *error;
1837         struct ipr_hostrcb_array_data_entry *array_entry;
1838         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1839
1840         error = &hostrcb->hcam.u.error.u.type_04_error;
1841
1842         ipr_err_separator;
1843
1844         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1845                 error->protection_level,
1846                 ioa_cfg->host->host_no,
1847                 error->last_func_vset_res_addr.bus,
1848                 error->last_func_vset_res_addr.target,
1849                 error->last_func_vset_res_addr.lun);
1850
1851         ipr_err_separator;
1852
1853         array_entry = error->array_member;
1854
1855         for (i = 0; i < 18; i++) {
1856                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1857                         continue;
1858
1859                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1860                         ipr_err("Exposed Array Member %d:\n", i);
1861                 else
1862                         ipr_err("Array Member %d:\n", i);
1863
1864                 ipr_log_vpd(&array_entry->vpd);
1865
1866                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1867                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1868                                  "Expected Location");
1869
1870                 ipr_err_separator;
1871
1872                 if (i == 9)
1873                         array_entry = error->array_member2;
1874                 else
1875                         array_entry++;
1876         }
1877 }
1878
1879 /**
1880  * ipr_log_hex_data - Log additional hex IOA error data.
1881  * @ioa_cfg:    ioa config struct
1882  * @data:               IOA error data
1883  * @len:                data length
1884  *
1885  * Return value:
1886  *      none
1887  **/
1888 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1889 {
1890         int i;
1891
1892         if (len == 0)
1893                 return;
1894
1895         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1896                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1897
1898         for (i = 0; i < len / 4; i += 4) {
1899                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1900                         be32_to_cpu(data[i]),
1901                         be32_to_cpu(data[i+1]),
1902                         be32_to_cpu(data[i+2]),
1903                         be32_to_cpu(data[i+3]));
1904         }
1905 }
1906
1907 /**
1908  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1909  * @ioa_cfg:    ioa config struct
1910  * @hostrcb:    hostrcb struct
1911  *
1912  * Return value:
1913  *      none
1914  **/
1915 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1916                                             struct ipr_hostrcb *hostrcb)
1917 {
1918         struct ipr_hostrcb_type_17_error *error;
1919
1920         if (ioa_cfg->sis64)
1921                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1922         else
1923                 error = &hostrcb->hcam.u.error.u.type_17_error;
1924
1925         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1926         strim(error->failure_reason);
1927
1928         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1929                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1930         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1931         ipr_log_hex_data(ioa_cfg, error->data,
1932                          be32_to_cpu(hostrcb->hcam.length) -
1933                          (offsetof(struct ipr_hostrcb_error, u) +
1934                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1935 }
1936
1937 /**
1938  * ipr_log_dual_ioa_error - Log a dual adapter error.
1939  * @ioa_cfg:    ioa config struct
1940  * @hostrcb:    hostrcb struct
1941  *
1942  * Return value:
1943  *      none
1944  **/
1945 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1946                                    struct ipr_hostrcb *hostrcb)
1947 {
1948         struct ipr_hostrcb_type_07_error *error;
1949
1950         error = &hostrcb->hcam.u.error.u.type_07_error;
1951         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1952         strim(error->failure_reason);
1953
1954         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1955                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1956         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1957         ipr_log_hex_data(ioa_cfg, error->data,
1958                          be32_to_cpu(hostrcb->hcam.length) -
1959                          (offsetof(struct ipr_hostrcb_error, u) +
1960                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1961 }
1962
1963 static const struct {
1964         u8 active;
1965         char *desc;
1966 } path_active_desc[] = {
1967         { IPR_PATH_NO_INFO, "Path" },
1968         { IPR_PATH_ACTIVE, "Active path" },
1969         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1970 };
1971
1972 static const struct {
1973         u8 state;
1974         char *desc;
1975 } path_state_desc[] = {
1976         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1977         { IPR_PATH_HEALTHY, "is healthy" },
1978         { IPR_PATH_DEGRADED, "is degraded" },
1979         { IPR_PATH_FAILED, "is failed" }
1980 };
1981
1982 /**
1983  * ipr_log_fabric_path - Log a fabric path error
1984  * @hostrcb:    hostrcb struct
1985  * @fabric:             fabric descriptor
1986  *
1987  * Return value:
1988  *      none
1989  **/
1990 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1991                                 struct ipr_hostrcb_fabric_desc *fabric)
1992 {
1993         int i, j;
1994         u8 path_state = fabric->path_state;
1995         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1996         u8 state = path_state & IPR_PATH_STATE_MASK;
1997
1998         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1999                 if (path_active_desc[i].active != active)
2000                         continue;
2001
2002                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2003                         if (path_state_desc[j].state != state)
2004                                 continue;
2005
2006                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2007                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2008                                              path_active_desc[i].desc, path_state_desc[j].desc,
2009                                              fabric->ioa_port);
2010                         } else if (fabric->cascaded_expander == 0xff) {
2011                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2012                                              path_active_desc[i].desc, path_state_desc[j].desc,
2013                                              fabric->ioa_port, fabric->phy);
2014                         } else if (fabric->phy == 0xff) {
2015                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2016                                              path_active_desc[i].desc, path_state_desc[j].desc,
2017                                              fabric->ioa_port, fabric->cascaded_expander);
2018                         } else {
2019                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2020                                              path_active_desc[i].desc, path_state_desc[j].desc,
2021                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2022                         }
2023                         return;
2024                 }
2025         }
2026
2027         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2028                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2029 }
2030
2031 /**
2032  * ipr_log64_fabric_path - Log a fabric path error
2033  * @hostrcb:    hostrcb struct
2034  * @fabric:             fabric descriptor
2035  *
2036  * Return value:
2037  *      none
2038  **/
2039 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2040                                   struct ipr_hostrcb64_fabric_desc *fabric)
2041 {
2042         int i, j;
2043         u8 path_state = fabric->path_state;
2044         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2045         u8 state = path_state & IPR_PATH_STATE_MASK;
2046         char buffer[IPR_MAX_RES_PATH_LENGTH];
2047
2048         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2049                 if (path_active_desc[i].active != active)
2050                         continue;
2051
2052                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2053                         if (path_state_desc[j].state != state)
2054                                 continue;
2055
2056                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2057                                      path_active_desc[i].desc, path_state_desc[j].desc,
2058                                      ipr_format_res_path(hostrcb->ioa_cfg,
2059                                                 fabric->res_path,
2060                                                 buffer, sizeof(buffer)));
2061                         return;
2062                 }
2063         }
2064
2065         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2066                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2067                                     buffer, sizeof(buffer)));
2068 }
2069
2070 static const struct {
2071         u8 type;
2072         char *desc;
2073 } path_type_desc[] = {
2074         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2075         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2076         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2077         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2078 };
2079
2080 static const struct {
2081         u8 status;
2082         char *desc;
2083 } path_status_desc[] = {
2084         { IPR_PATH_CFG_NO_PROB, "Functional" },
2085         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2086         { IPR_PATH_CFG_FAILED, "Failed" },
2087         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2088         { IPR_PATH_NOT_DETECTED, "Missing" },
2089         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2090 };
2091
2092 static const char *link_rate[] = {
2093         "unknown",
2094         "disabled",
2095         "phy reset problem",
2096         "spinup hold",
2097         "port selector",
2098         "unknown",
2099         "unknown",
2100         "unknown",
2101         "1.5Gbps",
2102         "3.0Gbps",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "unknown",
2107         "unknown",
2108         "unknown"
2109 };
2110
2111 /**
2112  * ipr_log_path_elem - Log a fabric path element.
2113  * @hostrcb:    hostrcb struct
2114  * @cfg:                fabric path element struct
2115  *
2116  * Return value:
2117  *      none
2118  **/
2119 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2120                               struct ipr_hostrcb_config_element *cfg)
2121 {
2122         int i, j;
2123         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2124         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2125
2126         if (type == IPR_PATH_CFG_NOT_EXIST)
2127                 return;
2128
2129         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2130                 if (path_type_desc[i].type != type)
2131                         continue;
2132
2133                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2134                         if (path_status_desc[j].status != status)
2135                                 continue;
2136
2137                         if (type == IPR_PATH_CFG_IOA_PORT) {
2138                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2139                                              path_status_desc[j].desc, path_type_desc[i].desc,
2140                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2141                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2142                         } else {
2143                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2144                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2145                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2146                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2147                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148                                 } else if (cfg->cascaded_expander == 0xff) {
2149                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2150                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2151                                                      path_type_desc[i].desc, cfg->phy,
2152                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2153                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2154                                 } else if (cfg->phy == 0xff) {
2155                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2156                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2157                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2158                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160                                 } else {
2161                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2162                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2163                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2164                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166                                 }
2167                         }
2168                         return;
2169                 }
2170         }
2171
2172         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2173                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2174                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176 }
2177
2178 /**
2179  * ipr_log64_path_elem - Log a fabric path element.
2180  * @hostrcb:    hostrcb struct
2181  * @cfg:                fabric path element struct
2182  *
2183  * Return value:
2184  *      none
2185  **/
2186 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2187                                 struct ipr_hostrcb64_config_element *cfg)
2188 {
2189         int i, j;
2190         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2191         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2192         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2193         char buffer[IPR_MAX_RES_PATH_LENGTH];
2194
2195         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2196                 return;
2197
2198         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2199                 if (path_type_desc[i].type != type)
2200                         continue;
2201
2202                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2203                         if (path_status_desc[j].status != status)
2204                                 continue;
2205
2206                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2207                                      path_status_desc[j].desc, path_type_desc[i].desc,
2208                                      ipr_format_res_path(hostrcb->ioa_cfg,
2209                                         cfg->res_path, buffer, sizeof(buffer)),
2210                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211                                         be32_to_cpu(cfg->wwid[0]),
2212                                         be32_to_cpu(cfg->wwid[1]));
2213                         return;
2214                 }
2215         }
2216         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2217                      "WWN=%08X%08X\n", cfg->type_status,
2218                      ipr_format_res_path(hostrcb->ioa_cfg,
2219                         cfg->res_path, buffer, sizeof(buffer)),
2220                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2221                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2222 }
2223
2224 /**
2225  * ipr_log_fabric_error - Log a fabric error.
2226  * @ioa_cfg:    ioa config struct
2227  * @hostrcb:    hostrcb struct
2228  *
2229  * Return value:
2230  *      none
2231  **/
2232 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2233                                  struct ipr_hostrcb *hostrcb)
2234 {
2235         struct ipr_hostrcb_type_20_error *error;
2236         struct ipr_hostrcb_fabric_desc *fabric;
2237         struct ipr_hostrcb_config_element *cfg;
2238         int i, add_len;
2239
2240         error = &hostrcb->hcam.u.error.u.type_20_error;
2241         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2242         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2243
2244         add_len = be32_to_cpu(hostrcb->hcam.length) -
2245                 (offsetof(struct ipr_hostrcb_error, u) +
2246                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2247
2248         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2249                 ipr_log_fabric_path(hostrcb, fabric);
2250                 for_each_fabric_cfg(fabric, cfg)
2251                         ipr_log_path_elem(hostrcb, cfg);
2252
2253                 add_len -= be16_to_cpu(fabric->length);
2254                 fabric = (struct ipr_hostrcb_fabric_desc *)
2255                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2256         }
2257
2258         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2259 }
2260
2261 /**
2262  * ipr_log_sis64_array_error - Log a sis64 array error.
2263  * @ioa_cfg:    ioa config struct
2264  * @hostrcb:    hostrcb struct
2265  *
2266  * Return value:
2267  *      none
2268  **/
2269 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2270                                       struct ipr_hostrcb *hostrcb)
2271 {
2272         int i, num_entries;
2273         struct ipr_hostrcb_type_24_error *error;
2274         struct ipr_hostrcb64_array_data_entry *array_entry;
2275         char buffer[IPR_MAX_RES_PATH_LENGTH];
2276         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2277
2278         error = &hostrcb->hcam.u.error64.u.type_24_error;
2279
2280         ipr_err_separator;
2281
2282         ipr_err("RAID %s Array Configuration: %s\n",
2283                 error->protection_level,
2284                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2285                         buffer, sizeof(buffer)));
2286
2287         ipr_err_separator;
2288
2289         array_entry = error->array_member;
2290         num_entries = min_t(u32, error->num_entries,
2291                             ARRAY_SIZE(error->array_member));
2292
2293         for (i = 0; i < num_entries; i++, array_entry++) {
2294
2295                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2296                         continue;
2297
2298                 if (error->exposed_mode_adn == i)
2299                         ipr_err("Exposed Array Member %d:\n", i);
2300                 else
2301                         ipr_err("Array Member %d:\n", i);
2302
2303                 ipr_err("Array Member %d:\n", i);
2304                 ipr_log_ext_vpd(&array_entry->vpd);
2305                 ipr_err("Current Location: %s\n",
2306                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2307                                 buffer, sizeof(buffer)));
2308                 ipr_err("Expected Location: %s\n",
2309                          ipr_format_res_path(ioa_cfg,
2310                                 array_entry->expected_res_path,
2311                                 buffer, sizeof(buffer)));
2312
2313                 ipr_err_separator;
2314         }
2315 }
2316
2317 /**
2318  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2319  * @ioa_cfg:    ioa config struct
2320  * @hostrcb:    hostrcb struct
2321  *
2322  * Return value:
2323  *      none
2324  **/
2325 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2326                                        struct ipr_hostrcb *hostrcb)
2327 {
2328         struct ipr_hostrcb_type_30_error *error;
2329         struct ipr_hostrcb64_fabric_desc *fabric;
2330         struct ipr_hostrcb64_config_element *cfg;
2331         int i, add_len;
2332
2333         error = &hostrcb->hcam.u.error64.u.type_30_error;
2334
2335         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2336         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2337
2338         add_len = be32_to_cpu(hostrcb->hcam.length) -
2339                 (offsetof(struct ipr_hostrcb64_error, u) +
2340                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2341
2342         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2343                 ipr_log64_fabric_path(hostrcb, fabric);
2344                 for_each_fabric_cfg(fabric, cfg)
2345                         ipr_log64_path_elem(hostrcb, cfg);
2346
2347                 add_len -= be16_to_cpu(fabric->length);
2348                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2349                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2350         }
2351
2352         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2353 }
2354
2355 /**
2356  * ipr_log_generic_error - Log an adapter error.
2357  * @ioa_cfg:    ioa config struct
2358  * @hostrcb:    hostrcb struct
2359  *
2360  * Return value:
2361  *      none
2362  **/
2363 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2364                                   struct ipr_hostrcb *hostrcb)
2365 {
2366         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2367                          be32_to_cpu(hostrcb->hcam.length));
2368 }
2369
2370 /**
2371  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2372  * @ioasc:      IOASC
2373  *
2374  * This function will return the index of into the ipr_error_table
2375  * for the specified IOASC. If the IOASC is not in the table,
2376  * 0 will be returned, which points to the entry used for unknown errors.
2377  *
2378  * Return value:
2379  *      index into the ipr_error_table
2380  **/
2381 static u32 ipr_get_error(u32 ioasc)
2382 {
2383         int i;
2384
2385         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2386                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2387                         return i;
2388
2389         return 0;
2390 }
2391
2392 /**
2393  * ipr_handle_log_data - Log an adapter error.
2394  * @ioa_cfg:    ioa config struct
2395  * @hostrcb:    hostrcb struct
2396  *
2397  * This function logs an adapter error to the system.
2398  *
2399  * Return value:
2400  *      none
2401  **/
2402 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2403                                 struct ipr_hostrcb *hostrcb)
2404 {
2405         u32 ioasc;
2406         int error_index;
2407
2408         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2409                 return;
2410
2411         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2412                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2413
2414         if (ioa_cfg->sis64)
2415                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2416         else
2417                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2418
2419         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2420             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2421                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2422                 scsi_report_bus_reset(ioa_cfg->host,
2423                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2424         }
2425
2426         error_index = ipr_get_error(ioasc);
2427
2428         if (!ipr_error_table[error_index].log_hcam)
2429                 return;
2430
2431         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2432
2433         /* Set indication we have logged an error */
2434         ioa_cfg->errors_logged++;
2435
2436         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2437                 return;
2438         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2439                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2440
2441         switch (hostrcb->hcam.overlay_id) {
2442         case IPR_HOST_RCB_OVERLAY_ID_2:
2443                 ipr_log_cache_error(ioa_cfg, hostrcb);
2444                 break;
2445         case IPR_HOST_RCB_OVERLAY_ID_3:
2446                 ipr_log_config_error(ioa_cfg, hostrcb);
2447                 break;
2448         case IPR_HOST_RCB_OVERLAY_ID_4:
2449         case IPR_HOST_RCB_OVERLAY_ID_6:
2450                 ipr_log_array_error(ioa_cfg, hostrcb);
2451                 break;
2452         case IPR_HOST_RCB_OVERLAY_ID_7:
2453                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2454                 break;
2455         case IPR_HOST_RCB_OVERLAY_ID_12:
2456                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2457                 break;
2458         case IPR_HOST_RCB_OVERLAY_ID_13:
2459                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2460                 break;
2461         case IPR_HOST_RCB_OVERLAY_ID_14:
2462         case IPR_HOST_RCB_OVERLAY_ID_16:
2463                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2464                 break;
2465         case IPR_HOST_RCB_OVERLAY_ID_17:
2466                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2467                 break;
2468         case IPR_HOST_RCB_OVERLAY_ID_20:
2469                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2470                 break;
2471         case IPR_HOST_RCB_OVERLAY_ID_23:
2472                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2473                 break;
2474         case IPR_HOST_RCB_OVERLAY_ID_24:
2475         case IPR_HOST_RCB_OVERLAY_ID_26:
2476                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2477                 break;
2478         case IPR_HOST_RCB_OVERLAY_ID_30:
2479                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2480                 break;
2481         case IPR_HOST_RCB_OVERLAY_ID_1:
2482         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2483         default:
2484                 ipr_log_generic_error(ioa_cfg, hostrcb);
2485                 break;
2486         }
2487 }
2488
2489 /**
2490  * ipr_process_error - Op done function for an adapter error log.
2491  * @ipr_cmd:    ipr command struct
2492  *
2493  * This function is the op done function for an error log host
2494  * controlled async from the adapter. It will log the error and
2495  * send the HCAM back to the adapter.
2496  *
2497  * Return value:
2498  *      none
2499  **/
2500 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2501 {
2502         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2503         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2504         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2505         u32 fd_ioasc;
2506
2507         if (ioa_cfg->sis64)
2508                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2509         else
2510                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2511
2512         list_del(&hostrcb->queue);
2513         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2514
2515         if (!ioasc) {
2516                 ipr_handle_log_data(ioa_cfg, hostrcb);
2517                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2518                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2519         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2520                 dev_err(&ioa_cfg->pdev->dev,
2521                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2522         }
2523
2524         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2525 }
2526
2527 /**
2528  * ipr_timeout -  An internally generated op has timed out.
2529  * @ipr_cmd:    ipr command struct
2530  *
2531  * This function blocks host requests and initiates an
2532  * adapter reset.
2533  *
2534  * Return value:
2535  *      none
2536  **/
2537 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2538 {
2539         unsigned long lock_flags = 0;
2540         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2541
2542         ENTER;
2543         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2544
2545         ioa_cfg->errors_logged++;
2546         dev_err(&ioa_cfg->pdev->dev,
2547                 "Adapter being reset due to command timeout.\n");
2548
2549         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2550                 ioa_cfg->sdt_state = GET_DUMP;
2551
2552         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2553                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2554
2555         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556         LEAVE;
2557 }
2558
2559 /**
2560  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2561  * @ipr_cmd:    ipr command struct
2562  *
2563  * This function blocks host requests and initiates an
2564  * adapter reset.
2565  *
2566  * Return value:
2567  *      none
2568  **/
2569 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2570 {
2571         unsigned long lock_flags = 0;
2572         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2573
2574         ENTER;
2575         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2576
2577         ioa_cfg->errors_logged++;
2578         dev_err(&ioa_cfg->pdev->dev,
2579                 "Adapter timed out transitioning to operational.\n");
2580
2581         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2582                 ioa_cfg->sdt_state = GET_DUMP;
2583
2584         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2585                 if (ipr_fastfail)
2586                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2587                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2588         }
2589
2590         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2591         LEAVE;
2592 }
2593
2594 /**
2595  * ipr_find_ses_entry - Find matching SES in SES table
2596  * @res:        resource entry struct of SES
2597  *
2598  * Return value:
2599  *      pointer to SES table entry / NULL on failure
2600  **/
2601 static const struct ipr_ses_table_entry *
2602 ipr_find_ses_entry(struct ipr_resource_entry *res)
2603 {
2604         int i, j, matches;
2605         struct ipr_std_inq_vpids *vpids;
2606         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2607
2608         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2609                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2610                         if (ste->compare_product_id_byte[j] == 'X') {
2611                                 vpids = &res->std_inq_data.vpids;
2612                                 if (vpids->product_id[j] == ste->product_id[j])
2613                                         matches++;
2614                                 else
2615                                         break;
2616                         } else
2617                                 matches++;
2618                 }
2619
2620                 if (matches == IPR_PROD_ID_LEN)
2621                         return ste;
2622         }
2623
2624         return NULL;
2625 }
2626
2627 /**
2628  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2629  * @ioa_cfg:    ioa config struct
2630  * @bus:                SCSI bus
2631  * @bus_width:  bus width
2632  *
2633  * Return value:
2634  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2635  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2636  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2637  *      max 160MHz = max 320MB/sec).
2638  **/
2639 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2640 {
2641         struct ipr_resource_entry *res;
2642         const struct ipr_ses_table_entry *ste;
2643         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2644
2645         /* Loop through each config table entry in the config table buffer */
2646         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2647                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2648                         continue;
2649
2650                 if (bus != res->bus)
2651                         continue;
2652
2653                 if (!(ste = ipr_find_ses_entry(res)))
2654                         continue;
2655
2656                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2657         }
2658
2659         return max_xfer_rate;
2660 }
2661
2662 /**
2663  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2664  * @ioa_cfg:            ioa config struct
2665  * @max_delay:          max delay in micro-seconds to wait
2666  *
2667  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2668  *
2669  * Return value:
2670  *      0 on success / other on failure
2671  **/
2672 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2673 {
2674         volatile u32 pcii_reg;
2675         int delay = 1;
2676
2677         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2678         while (delay < max_delay) {
2679                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2680
2681                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2682                         return 0;
2683
2684                 /* udelay cannot be used if delay is more than a few milliseconds */
2685                 if ((delay / 1000) > MAX_UDELAY_MS)
2686                         mdelay(delay / 1000);
2687                 else
2688                         udelay(delay);
2689
2690                 delay += delay;
2691         }
2692         return -EIO;
2693 }
2694
2695 /**
2696  * ipr_get_sis64_dump_data_section - Dump IOA memory
2697  * @ioa_cfg:                    ioa config struct
2698  * @start_addr:                 adapter address to dump
2699  * @dest:                       destination kernel buffer
2700  * @length_in_words:            length to dump in 4 byte words
2701  *
2702  * Return value:
2703  *      0 on success
2704  **/
2705 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2706                                            u32 start_addr,
2707                                            __be32 *dest, u32 length_in_words)
2708 {
2709         int i;
2710
2711         for (i = 0; i < length_in_words; i++) {
2712                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2713                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2714                 dest++;
2715         }
2716
2717         return 0;
2718 }
2719
2720 /**
2721  * ipr_get_ldump_data_section - Dump IOA memory
2722  * @ioa_cfg:                    ioa config struct
2723  * @start_addr:                 adapter address to dump
2724  * @dest:                               destination kernel buffer
2725  * @length_in_words:    length to dump in 4 byte words
2726  *
2727  * Return value:
2728  *      0 on success / -EIO on failure
2729  **/
2730 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2731                                       u32 start_addr,
2732                                       __be32 *dest, u32 length_in_words)
2733 {
2734         volatile u32 temp_pcii_reg;
2735         int i, delay = 0;
2736
2737         if (ioa_cfg->sis64)
2738                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2739                                                        dest, length_in_words);
2740
2741         /* Write IOA interrupt reg starting LDUMP state  */
2742         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2743                ioa_cfg->regs.set_uproc_interrupt_reg32);
2744
2745         /* Wait for IO debug acknowledge */
2746         if (ipr_wait_iodbg_ack(ioa_cfg,
2747                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2748                 dev_err(&ioa_cfg->pdev->dev,
2749                         "IOA dump long data transfer timeout\n");
2750                 return -EIO;
2751         }
2752
2753         /* Signal LDUMP interlocked - clear IO debug ack */
2754         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2755                ioa_cfg->regs.clr_interrupt_reg);
2756
2757         /* Write Mailbox with starting address */
2758         writel(start_addr, ioa_cfg->ioa_mailbox);
2759
2760         /* Signal address valid - clear IOA Reset alert */
2761         writel(IPR_UPROCI_RESET_ALERT,
2762                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2763
2764         for (i = 0; i < length_in_words; i++) {
2765                 /* Wait for IO debug acknowledge */
2766                 if (ipr_wait_iodbg_ack(ioa_cfg,
2767                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2768                         dev_err(&ioa_cfg->pdev->dev,
2769                                 "IOA dump short data transfer timeout\n");
2770                         return -EIO;
2771                 }
2772
2773                 /* Read data from mailbox and increment destination pointer */
2774                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2775                 dest++;
2776
2777                 /* For all but the last word of data, signal data received */
2778                 if (i < (length_in_words - 1)) {
2779                         /* Signal dump data received - Clear IO debug Ack */
2780                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2781                                ioa_cfg->regs.clr_interrupt_reg);
2782                 }
2783         }
2784
2785         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2786         writel(IPR_UPROCI_RESET_ALERT,
2787                ioa_cfg->regs.set_uproc_interrupt_reg32);
2788
2789         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2790                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2791
2792         /* Signal dump data received - Clear IO debug Ack */
2793         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2794                ioa_cfg->regs.clr_interrupt_reg);
2795
2796         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2797         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2798                 temp_pcii_reg =
2799                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2800
2801                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2802                         return 0;
2803
2804                 udelay(10);
2805                 delay += 10;
2806         }
2807
2808         return 0;
2809 }
2810
2811 #ifdef CONFIG_SCSI_IPR_DUMP
2812 /**
2813  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2814  * @ioa_cfg:            ioa config struct
2815  * @pci_address:        adapter address
2816  * @length:                     length of data to copy
2817  *
2818  * Copy data from PCI adapter to kernel buffer.
2819  * Note: length MUST be a 4 byte multiple
2820  * Return value:
2821  *      0 on success / other on failure
2822  **/
2823 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2824                         unsigned long pci_address, u32 length)
2825 {
2826         int bytes_copied = 0;
2827         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2828         __be32 *page;
2829         unsigned long lock_flags = 0;
2830         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2831
2832         if (ioa_cfg->sis64)
2833                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2834         else
2835                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2836
2837         while (bytes_copied < length &&
2838                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2839                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2840                     ioa_dump->page_offset == 0) {
2841                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2842
2843                         if (!page) {
2844                                 ipr_trace;
2845                                 return bytes_copied;
2846                         }
2847
2848                         ioa_dump->page_offset = 0;
2849                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2850                         ioa_dump->next_page_index++;
2851                 } else
2852                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2853
2854                 rem_len = length - bytes_copied;
2855                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2856                 cur_len = min(rem_len, rem_page_len);
2857
2858                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2859                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2860                         rc = -EIO;
2861                 } else {
2862                         rc = ipr_get_ldump_data_section(ioa_cfg,
2863                                                         pci_address + bytes_copied,
2864                                                         &page[ioa_dump->page_offset / 4],
2865                                                         (cur_len / sizeof(u32)));
2866                 }
2867                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2868
2869                 if (!rc) {
2870                         ioa_dump->page_offset += cur_len;
2871                         bytes_copied += cur_len;
2872                 } else {
2873                         ipr_trace;
2874                         break;
2875                 }
2876                 schedule();
2877         }
2878
2879         return bytes_copied;
2880 }
2881
2882 /**
2883  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2884  * @hdr:        dump entry header struct
2885  *
2886  * Return value:
2887  *      nothing
2888  **/
2889 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2890 {
2891         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2892         hdr->num_elems = 1;
2893         hdr->offset = sizeof(*hdr);
2894         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2895 }
2896
2897 /**
2898  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2899  * @ioa_cfg:    ioa config struct
2900  * @driver_dump:        driver dump struct
2901  *
2902  * Return value:
2903  *      nothing
2904  **/
2905 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2906                                    struct ipr_driver_dump *driver_dump)
2907 {
2908         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2909
2910         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2911         driver_dump->ioa_type_entry.hdr.len =
2912                 sizeof(struct ipr_dump_ioa_type_entry) -
2913                 sizeof(struct ipr_dump_entry_header);
2914         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2915         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2916         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2917         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2918                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2919                 ucode_vpd->minor_release[1];
2920         driver_dump->hdr.num_entries++;
2921 }
2922
2923 /**
2924  * ipr_dump_version_data - Fill in the driver version in the dump.
2925  * @ioa_cfg:    ioa config struct
2926  * @driver_dump:        driver dump struct
2927  *
2928  * Return value:
2929  *      nothing
2930  **/
2931 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2932                                   struct ipr_driver_dump *driver_dump)
2933 {
2934         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2935         driver_dump->version_entry.hdr.len =
2936                 sizeof(struct ipr_dump_version_entry) -
2937                 sizeof(struct ipr_dump_entry_header);
2938         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2939         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2940         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2941         driver_dump->hdr.num_entries++;
2942 }
2943
2944 /**
2945  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2946  * @ioa_cfg:    ioa config struct
2947  * @driver_dump:        driver dump struct
2948  *
2949  * Return value:
2950  *      nothing
2951  **/
2952 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2953                                    struct ipr_driver_dump *driver_dump)
2954 {
2955         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2956         driver_dump->trace_entry.hdr.len =
2957                 sizeof(struct ipr_dump_trace_entry) -
2958                 sizeof(struct ipr_dump_entry_header);
2959         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2960         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2961         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2962         driver_dump->hdr.num_entries++;
2963 }
2964
2965 /**
2966  * ipr_dump_location_data - Fill in the IOA location in the dump.
2967  * @ioa_cfg:    ioa config struct
2968  * @driver_dump:        driver dump struct
2969  *
2970  * Return value:
2971  *      nothing
2972  **/
2973 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2974                                    struct ipr_driver_dump *driver_dump)
2975 {
2976         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2977         driver_dump->location_entry.hdr.len =
2978                 sizeof(struct ipr_dump_location_entry) -
2979                 sizeof(struct ipr_dump_entry_header);
2980         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2981         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2982         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2983         driver_dump->hdr.num_entries++;
2984 }
2985
2986 /**
2987  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2988  * @ioa_cfg:    ioa config struct
2989  * @dump:               dump struct
2990  *
2991  * Return value:
2992  *      nothing
2993  **/
2994 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2995 {
2996         unsigned long start_addr, sdt_word;
2997         unsigned long lock_flags = 0;
2998         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2999         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3000         u32 num_entries, max_num_entries, start_off, end_off;
3001         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3002         struct ipr_sdt *sdt;
3003         int valid = 1;
3004         int i;
3005
3006         ENTER;
3007
3008         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3009
3010         if (ioa_cfg->sdt_state != READ_DUMP) {
3011                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3012                 return;
3013         }
3014
3015         if (ioa_cfg->sis64) {
3016                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3017                 ssleep(IPR_DUMP_DELAY_SECONDS);
3018                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3019         }
3020
3021         start_addr = readl(ioa_cfg->ioa_mailbox);
3022
3023         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3024                 dev_err(&ioa_cfg->pdev->dev,
3025                         "Invalid dump table format: %lx\n", start_addr);
3026                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3027                 return;
3028         }
3029
3030         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3031
3032         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3033
3034         /* Initialize the overall dump header */
3035         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3036         driver_dump->hdr.num_entries = 1;
3037         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3038         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3039         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3040         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3041
3042         ipr_dump_version_data(ioa_cfg, driver_dump);
3043         ipr_dump_location_data(ioa_cfg, driver_dump);
3044         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3045         ipr_dump_trace_data(ioa_cfg, driver_dump);
3046
3047         /* Update dump_header */
3048         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3049
3050         /* IOA Dump entry */
3051         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3052         ioa_dump->hdr.len = 0;
3053         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3054         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3055
3056         /* First entries in sdt are actually a list of dump addresses and
3057          lengths to gather the real dump data.  sdt represents the pointer
3058          to the ioa generated dump table.  Dump data will be extracted based
3059          on entries in this table */
3060         sdt = &ioa_dump->sdt;
3061
3062         if (ioa_cfg->sis64) {
3063                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3064                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3065         } else {
3066                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3067                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3068         }
3069
3070         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3071                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3072         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3073                                         bytes_to_copy / sizeof(__be32));
3074
3075         /* Smart Dump table is ready to use and the first entry is valid */
3076         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3077             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3078                 dev_err(&ioa_cfg->pdev->dev,
3079                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3080                         rc, be32_to_cpu(sdt->hdr.state));
3081                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3082                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3083                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084                 return;
3085         }
3086
3087         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3088
3089         if (num_entries > max_num_entries)
3090                 num_entries = max_num_entries;
3091
3092         /* Update dump length to the actual data to be copied */
3093         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3094         if (ioa_cfg->sis64)
3095                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3096         else
3097                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3098
3099         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3100
3101         for (i = 0; i < num_entries; i++) {
3102                 if (ioa_dump->hdr.len > max_dump_size) {
3103                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3104                         break;
3105                 }
3106
3107                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3108                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3109                         if (ioa_cfg->sis64)
3110                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3111                         else {
3112                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3113                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3114
3115                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3116                                         bytes_to_copy = end_off - start_off;
3117                                 else
3118                                         valid = 0;
3119                         }
3120                         if (valid) {
3121                                 if (bytes_to_copy > max_dump_size) {
3122                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3123                                         continue;
3124                                 }
3125
3126                                 /* Copy data from adapter to driver buffers */
3127                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3128                                                             bytes_to_copy);
3129
3130                                 ioa_dump->hdr.len += bytes_copied;
3131
3132                                 if (bytes_copied != bytes_to_copy) {
3133                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3134                                         break;
3135                                 }
3136                         }
3137                 }
3138         }
3139
3140         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3141
3142         /* Update dump_header */
3143         driver_dump->hdr.len += ioa_dump->hdr.len;
3144         wmb();
3145         ioa_cfg->sdt_state = DUMP_OBTAINED;
3146         LEAVE;
3147 }
3148
3149 #else
3150 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3151 #endif
3152
3153 /**
3154  * ipr_release_dump - Free adapter dump memory
3155  * @kref:       kref struct
3156  *
3157  * Return value:
3158  *      nothing
3159  **/
3160 static void ipr_release_dump(struct kref *kref)
3161 {
3162         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3163         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3164         unsigned long lock_flags = 0;
3165         int i;
3166
3167         ENTER;
3168         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3169         ioa_cfg->dump = NULL;
3170         ioa_cfg->sdt_state = INACTIVE;
3171         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3172
3173         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3174                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3175
3176         vfree(dump->ioa_dump.ioa_data);
3177         kfree(dump);
3178         LEAVE;
3179 }
3180
3181 /**
3182  * ipr_worker_thread - Worker thread
3183  * @work:               ioa config struct
3184  *
3185  * Called at task level from a work thread. This function takes care
3186  * of adding and removing device from the mid-layer as configuration
3187  * changes are detected by the adapter.
3188  *
3189  * Return value:
3190  *      nothing
3191  **/
3192 static void ipr_worker_thread(struct work_struct *work)
3193 {
3194         unsigned long lock_flags;
3195         struct ipr_resource_entry *res;
3196         struct scsi_device *sdev;
3197         struct ipr_dump *dump;
3198         struct ipr_ioa_cfg *ioa_cfg =
3199                 container_of(work, struct ipr_ioa_cfg, work_q);
3200         u8 bus, target, lun;
3201         int did_work;
3202
3203         ENTER;
3204         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3205
3206         if (ioa_cfg->sdt_state == READ_DUMP) {
3207                 dump = ioa_cfg->dump;
3208                 if (!dump) {
3209                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3210                         return;
3211                 }
3212                 kref_get(&dump->kref);
3213                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214                 ipr_get_ioa_dump(ioa_cfg, dump);
3215                 kref_put(&dump->kref, ipr_release_dump);
3216
3217                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3218                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3219                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3220                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3221                 return;
3222         }
3223
3224 restart:
3225         do {
3226                 did_work = 0;
3227                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3228                     !ioa_cfg->allow_ml_add_del) {
3229                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230                         return;
3231                 }
3232
3233                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3234                         if (res->del_from_ml && res->sdev) {
3235                                 did_work = 1;
3236                                 sdev = res->sdev;
3237                                 if (!scsi_device_get(sdev)) {
3238                                         if (!res->add_to_ml)
3239                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3240                                         else
3241                                                 res->del_from_ml = 0;
3242                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3243                                         scsi_remove_device(sdev);
3244                                         scsi_device_put(sdev);
3245                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3246                                 }
3247                                 break;
3248                         }
3249                 }
3250         } while (did_work);
3251
3252         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3253                 if (res->add_to_ml) {
3254                         bus = res->bus;
3255                         target = res->target;
3256                         lun = res->lun;
3257                         res->add_to_ml = 0;
3258                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3260                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3261                         goto restart;
3262                 }
3263         }
3264
3265         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3266         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3267         LEAVE;
3268 }
3269
3270 #ifdef CONFIG_SCSI_IPR_TRACE
3271 /**
3272  * ipr_read_trace - Dump the adapter trace
3273  * @filp:               open sysfs file
3274  * @kobj:               kobject struct
3275  * @bin_attr:           bin_attribute struct
3276  * @buf:                buffer
3277  * @off:                offset
3278  * @count:              buffer size
3279  *
3280  * Return value:
3281  *      number of bytes printed to buffer
3282  **/
3283 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3284                               struct bin_attribute *bin_attr,
3285                               char *buf, loff_t off, size_t count)
3286 {
3287         struct device *dev = container_of(kobj, struct device, kobj);
3288         struct Scsi_Host *shost = class_to_shost(dev);
3289         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3290         unsigned long lock_flags = 0;
3291         ssize_t ret;
3292
3293         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3294         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3295                                 IPR_TRACE_SIZE);
3296         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297
3298         return ret;
3299 }
3300
3301 static struct bin_attribute ipr_trace_attr = {
3302         .attr = {
3303                 .name = "trace",
3304                 .mode = S_IRUGO,
3305         },
3306         .size = 0,
3307         .read = ipr_read_trace,
3308 };
3309 #endif
3310
3311 /**
3312  * ipr_show_fw_version - Show the firmware version
3313  * @dev:        class device struct
3314  * @buf:        buffer
3315  *
3316  * Return value:
3317  *      number of bytes printed to buffer
3318  **/
3319 static ssize_t ipr_show_fw_version(struct device *dev,
3320                                    struct device_attribute *attr, char *buf)
3321 {
3322         struct Scsi_Host *shost = class_to_shost(dev);
3323         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3324         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3325         unsigned long lock_flags = 0;
3326         int len;
3327
3328         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3329         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3330                        ucode_vpd->major_release, ucode_vpd->card_type,
3331                        ucode_vpd->minor_release[0],
3332                        ucode_vpd->minor_release[1]);
3333         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3334         return len;
3335 }
3336
3337 static struct device_attribute ipr_fw_version_attr = {
3338         .attr = {
3339                 .name =         "fw_version",
3340                 .mode =         S_IRUGO,
3341         },
3342         .show = ipr_show_fw_version,
3343 };
3344
3345 /**
3346  * ipr_show_log_level - Show the adapter's error logging level
3347  * @dev:        class device struct
3348  * @buf:        buffer
3349  *
3350  * Return value:
3351  *      number of bytes printed to buffer
3352  **/
3353 static ssize_t ipr_show_log_level(struct device *dev,
3354                                    struct device_attribute *attr, char *buf)
3355 {
3356         struct Scsi_Host *shost = class_to_shost(dev);
3357         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3358         unsigned long lock_flags = 0;
3359         int len;
3360
3361         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3362         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3363         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3364         return len;
3365 }
3366
3367 /**
3368  * ipr_store_log_level - Change the adapter's error logging level
3369  * @dev:        class device struct
3370  * @buf:        buffer
3371  *
3372  * Return value:
3373  *      number of bytes printed to buffer
3374  **/
3375 static ssize_t ipr_store_log_level(struct device *dev,
3376                                    struct device_attribute *attr,
3377                                    const char *buf, size_t count)
3378 {
3379         struct Scsi_Host *shost = class_to_shost(dev);
3380         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3381         unsigned long lock_flags = 0;
3382
3383         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3384         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3385         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3386         return strlen(buf);
3387 }
3388
3389 static struct device_attribute ipr_log_level_attr = {
3390         .attr = {
3391                 .name =         "log_level",
3392                 .mode =         S_IRUGO | S_IWUSR,
3393         },
3394         .show = ipr_show_log_level,
3395         .store = ipr_store_log_level
3396 };
3397
3398 /**
3399  * ipr_store_diagnostics - IOA Diagnostics interface
3400  * @dev:        device struct
3401  * @buf:        buffer
3402  * @count:      buffer size
3403  *
3404  * This function will reset the adapter and wait a reasonable
3405  * amount of time for any errors that the adapter might log.
3406  *
3407  * Return value:
3408  *      count on success / other on failure
3409  **/
3410 static ssize_t ipr_store_diagnostics(struct device *dev,
3411                                      struct device_attribute *attr,
3412                                      const char *buf, size_t count)
3413 {
3414         struct Scsi_Host *shost = class_to_shost(dev);
3415         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3416         unsigned long lock_flags = 0;
3417         int rc = count;
3418
3419         if (!capable(CAP_SYS_ADMIN))
3420                 return -EACCES;
3421
3422         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3423         while (ioa_cfg->in_reset_reload) {
3424                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3426                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3427         }
3428
3429         ioa_cfg->errors_logged = 0;
3430         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3431
3432         if (ioa_cfg->in_reset_reload) {
3433                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3434                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3435
3436                 /* Wait for a second for any errors to be logged */
3437                 msleep(1000);
3438         } else {
3439                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440                 return -EIO;
3441         }
3442
3443         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3444         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3445                 rc = -EIO;
3446         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3447
3448         return rc;
3449 }
3450
3451 static struct device_attribute ipr_diagnostics_attr = {
3452         .attr = {
3453                 .name =         "run_diagnostics",
3454                 .mode =         S_IWUSR,
3455         },
3456         .store = ipr_store_diagnostics
3457 };
3458
3459 /**
3460  * ipr_show_adapter_state - Show the adapter's state
3461  * @class_dev:  device struct
3462  * @buf:        buffer
3463  *
3464  * Return value:
3465  *      number of bytes printed to buffer
3466  **/
3467 static ssize_t ipr_show_adapter_state(struct device *dev,
3468                                       struct device_attribute *attr, char *buf)
3469 {
3470         struct Scsi_Host *shost = class_to_shost(dev);
3471         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472         unsigned long lock_flags = 0;
3473         int len;
3474
3475         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3476         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3477                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3478         else
3479                 len = snprintf(buf, PAGE_SIZE, "online\n");
3480         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3481         return len;
3482 }
3483
3484 /**
3485  * ipr_store_adapter_state - Change adapter state
3486  * @dev:        device struct
3487  * @buf:        buffer
3488  * @count:      buffer size
3489  *
3490  * This function will change the adapter's state.
3491  *
3492  * Return value:
3493  *      count on success / other on failure
3494  **/
3495 static ssize_t ipr_store_adapter_state(struct device *dev,
3496                                        struct device_attribute *attr,
3497                                        const char *buf, size_t count)
3498 {
3499         struct Scsi_Host *shost = class_to_shost(dev);
3500         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3501         unsigned long lock_flags;
3502         int result = count, i;
3503
3504         if (!capable(CAP_SYS_ADMIN))
3505                 return -EACCES;
3506
3507         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3508         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3509             !strncmp(buf, "online", 6)) {
3510                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3511                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3512                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3513                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3514                 }
3515                 wmb();
3516                 ioa_cfg->reset_retries = 0;
3517                 ioa_cfg->in_ioa_bringdown = 0;
3518                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3519         }
3520         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3522
3523         return result;
3524 }
3525
3526 static struct device_attribute ipr_ioa_state_attr = {
3527         .attr = {
3528                 .name =         "online_state",
3529                 .mode =         S_IRUGO | S_IWUSR,
3530         },
3531         .show = ipr_show_adapter_state,
3532         .store = ipr_store_adapter_state
3533 };
3534
3535 /**
3536  * ipr_store_reset_adapter - Reset the adapter
3537  * @dev:        device struct
3538  * @buf:        buffer
3539  * @count:      buffer size
3540  *
3541  * This function will reset the adapter.
3542  *
3543  * Return value:
3544  *      count on success / other on failure
3545  **/
3546 static ssize_t ipr_store_reset_adapter(struct device *dev,
3547                                        struct device_attribute *attr,
3548                                        const char *buf, size_t count)
3549 {
3550         struct Scsi_Host *shost = class_to_shost(dev);
3551         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3552         unsigned long lock_flags;
3553         int result = count;
3554
3555         if (!capable(CAP_SYS_ADMIN))
3556                 return -EACCES;
3557
3558         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3559         if (!ioa_cfg->in_reset_reload)
3560                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3561         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3562         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3563
3564         return result;
3565 }
3566
3567 static struct device_attribute ipr_ioa_reset_attr = {
3568         .attr = {
3569                 .name =         "reset_host",
3570                 .mode =         S_IWUSR,
3571         },
3572         .store = ipr_store_reset_adapter
3573 };
3574
3575 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3576  /**
3577  * ipr_show_iopoll_weight - Show ipr polling mode
3578  * @dev:        class device struct
3579  * @buf:        buffer
3580  *
3581  * Return value:
3582  *      number of bytes printed to buffer
3583  **/
3584 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3585                                    struct device_attribute *attr, char *buf)
3586 {
3587         struct Scsi_Host *shost = class_to_shost(dev);
3588         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3589         unsigned long lock_flags = 0;
3590         int len;
3591
3592         spin_lock_irqsave(shost->host_lock, lock_flags);
3593         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3594         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3595
3596         return len;
3597 }
3598
3599 /**
3600  * ipr_store_iopoll_weight - Change the adapter's polling mode
3601  * @dev:        class device struct
3602  * @buf:        buffer
3603  *
3604  * Return value:
3605  *      number of bytes printed to buffer
3606  **/
3607 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3608                                         struct device_attribute *attr,
3609                                         const char *buf, size_t count)
3610 {
3611         struct Scsi_Host *shost = class_to_shost(dev);
3612         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3613         unsigned long user_iopoll_weight;
3614         unsigned long lock_flags = 0;
3615         int i;
3616
3617         if (!ioa_cfg->sis64) {
3618                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3619                 return -EINVAL;
3620         }
3621         if (kstrtoul(buf, 10, &user_iopoll_weight))
3622                 return -EINVAL;
3623
3624         if (user_iopoll_weight > 256) {
3625                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3626                 return -EINVAL;
3627         }
3628
3629         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3630                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3631                 return strlen(buf);
3632         }
3633
3634         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3635                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3636                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3637                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3638         }
3639
3640         spin_lock_irqsave(shost->host_lock, lock_flags);
3641         ioa_cfg->iopoll_weight = user_iopoll_weight;
3642         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
3643                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3644                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3645                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3646                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3647                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3648                 }
3649         }
3650         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3651
3652         return strlen(buf);
3653 }
3654
3655 static struct device_attribute ipr_iopoll_weight_attr = {
3656         .attr = {
3657                 .name =         "iopoll_weight",
3658                 .mode =         S_IRUGO | S_IWUSR,
3659         },
3660         .show = ipr_show_iopoll_weight,
3661         .store = ipr_store_iopoll_weight
3662 };
3663
3664 /**
3665  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3666  * @buf_len:            buffer length
3667  *
3668  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3669  * list to use for microcode download
3670  *
3671  * Return value:
3672  *      pointer to sglist / NULL on failure
3673  **/
3674 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3675 {
3676         int sg_size, order, bsize_elem, num_elem, i, j;
3677         struct ipr_sglist *sglist;
3678         struct scatterlist *scatterlist;
3679         struct page *page;
3680
3681         /* Get the minimum size per scatter/gather element */
3682         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3683
3684         /* Get the actual size per element */
3685         order = get_order(sg_size);
3686
3687         /* Determine the actual number of bytes per element */
3688         bsize_elem = PAGE_SIZE * (1 << order);
3689
3690         /* Determine the actual number of sg entries needed */
3691         if (buf_len % bsize_elem)
3692                 num_elem = (buf_len / bsize_elem) + 1;
3693         else
3694                 num_elem = buf_len / bsize_elem;
3695
3696         /* Allocate a scatter/gather list for the DMA */
3697         sglist = kzalloc(sizeof(struct ipr_sglist) +
3698                          (sizeof(struct scatterlist) * (num_elem - 1)),
3699                          GFP_KERNEL);
3700
3701         if (sglist == NULL) {
3702                 ipr_trace;
3703                 return NULL;
3704         }
3705
3706         scatterlist = sglist->scatterlist;
3707         sg_init_table(scatterlist, num_elem);
3708
3709         sglist->order = order;
3710         sglist->num_sg = num_elem;
3711
3712         /* Allocate a bunch of sg elements */
3713         for (i = 0; i < num_elem; i++) {
3714                 page = alloc_pages(GFP_KERNEL, order);
3715                 if (!page) {
3716                         ipr_trace;
3717
3718                         /* Free up what we already allocated */
3719                         for (j = i - 1; j >= 0; j--)
3720                                 __free_pages(sg_page(&scatterlist[j]), order);
3721                         kfree(sglist);
3722                         return NULL;
3723                 }
3724
3725                 sg_set_page(&scatterlist[i], page, 0, 0);
3726         }
3727
3728         return sglist;
3729 }
3730
3731 /**
3732  * ipr_free_ucode_buffer - Frees a microcode download buffer
3733  * @p_dnld:             scatter/gather list pointer
3734  *
3735  * Free a DMA'able ucode download buffer previously allocated with
3736  * ipr_alloc_ucode_buffer
3737  *
3738  * Return value:
3739  *      nothing
3740  **/
3741 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3742 {
3743         int i;
3744
3745         for (i = 0; i < sglist->num_sg; i++)
3746                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3747
3748         kfree(sglist);
3749 }
3750
3751 /**
3752  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3753  * @sglist:             scatter/gather list pointer
3754  * @buffer:             buffer pointer
3755  * @len:                buffer length
3756  *
3757  * Copy a microcode image from a user buffer into a buffer allocated by
3758  * ipr_alloc_ucode_buffer
3759  *
3760  * Return value:
3761  *      0 on success / other on failure
3762  **/
3763 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3764                                  u8 *buffer, u32 len)
3765 {
3766         int bsize_elem, i, result = 0;
3767         struct scatterlist *scatterlist;
3768         void *kaddr;
3769
3770         /* Determine the actual number of bytes per element */
3771         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3772
3773         scatterlist = sglist->scatterlist;
3774
3775         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3776                 struct page *page = sg_page(&scatterlist[i]);
3777
3778                 kaddr = kmap(page);
3779                 memcpy(kaddr, buffer, bsize_elem);
3780                 kunmap(page);
3781
3782                 scatterlist[i].length = bsize_elem;
3783
3784                 if (result != 0) {
3785                         ipr_trace;
3786                         return result;
3787                 }
3788         }
3789
3790         if (len % bsize_elem) {
3791                 struct page *page = sg_page(&scatterlist[i]);
3792
3793                 kaddr = kmap(page);
3794                 memcpy(kaddr, buffer, len % bsize_elem);
3795                 kunmap(page);
3796
3797                 scatterlist[i].length = len % bsize_elem;
3798         }
3799
3800         sglist->buffer_len = len;
3801         return result;
3802 }
3803
3804 /**
3805  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3806  * @ipr_cmd:            ipr command struct
3807  * @sglist:             scatter/gather list
3808  *
3809  * Builds a microcode download IOA data list (IOADL).
3810  *
3811  **/
3812 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3813                                     struct ipr_sglist *sglist)
3814 {
3815         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3816         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3817         struct scatterlist *scatterlist = sglist->scatterlist;
3818         int i;
3819
3820         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3821         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3822         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3823
3824         ioarcb->ioadl_len =
3825                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3826         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3827                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3828                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3829                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3830         }
3831
3832         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3833 }
3834
3835 /**
3836  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3837  * @ipr_cmd:    ipr command struct
3838  * @sglist:             scatter/gather list
3839  *
3840  * Builds a microcode download IOA data list (IOADL).
3841  *
3842  **/
3843 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3844                                   struct ipr_sglist *sglist)
3845 {
3846         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3847         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3848         struct scatterlist *scatterlist = sglist->scatterlist;
3849         int i;
3850
3851         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3852         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3853         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3854
3855         ioarcb->ioadl_len =
3856                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3857
3858         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3859                 ioadl[i].flags_and_data_len =
3860                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3861                 ioadl[i].address =
3862                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3863         }
3864
3865         ioadl[i-1].flags_and_data_len |=
3866                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3867 }
3868
3869 /**
3870  * ipr_update_ioa_ucode - Update IOA's microcode
3871  * @ioa_cfg:    ioa config struct
3872  * @sglist:             scatter/gather list
3873  *
3874  * Initiate an adapter reset to update the IOA's microcode
3875  *
3876  * Return value:
3877  *      0 on success / -EIO on failure
3878  **/
3879 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3880                                 struct ipr_sglist *sglist)
3881 {
3882         unsigned long lock_flags;
3883
3884         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3885         while (ioa_cfg->in_reset_reload) {
3886                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3887                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3888                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3889         }
3890
3891         if (ioa_cfg->ucode_sglist) {
3892                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3893                 dev_err(&ioa_cfg->pdev->dev,
3894                         "Microcode download already in progress\n");
3895                 return -EIO;
3896         }
3897
3898         sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3899                                         sglist->num_sg, DMA_TO_DEVICE);
3900
3901         if (!sglist->num_dma_sg) {
3902                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3903                 dev_err(&ioa_cfg->pdev->dev,
3904                         "Failed to map microcode download buffer!\n");
3905                 return -EIO;
3906         }
3907
3908         ioa_cfg->ucode_sglist = sglist;
3909         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3910         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3911         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3912
3913         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3914         ioa_cfg->ucode_sglist = NULL;
3915         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3916         return 0;
3917 }
3918
3919 /**
3920  * ipr_store_update_fw - Update the firmware on the adapter
3921  * @class_dev:  device struct
3922  * @buf:        buffer
3923  * @count:      buffer size
3924  *
3925  * This function will update the firmware on the adapter.
3926  *
3927  * Return value:
3928  *      count on success / other on failure
3929  **/
3930 static ssize_t ipr_store_update_fw(struct device *dev,
3931                                    struct device_attribute *attr,
3932                                    const char *buf, size_t count)
3933 {
3934         struct Scsi_Host *shost = class_to_shost(dev);
3935         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3936         struct ipr_ucode_image_header *image_hdr;
3937         const struct firmware *fw_entry;
3938         struct ipr_sglist *sglist;
3939         char fname[100];
3940         char *src;
3941         int len, result, dnld_size;
3942
3943         if (!capable(CAP_SYS_ADMIN))
3944                 return -EACCES;
3945
3946         len = snprintf(fname, 99, "%s", buf);
3947         fname[len-1] = '\0';
3948
3949         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3950                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3951                 return -EIO;
3952         }
3953
3954         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3955
3956         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3957         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3958         sglist = ipr_alloc_ucode_buffer(dnld_size);
3959
3960         if (!sglist) {
3961                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3962                 release_firmware(fw_entry);
3963                 return -ENOMEM;
3964         }
3965
3966         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3967
3968         if (result) {
3969                 dev_err(&ioa_cfg->pdev->dev,
3970                         "Microcode buffer copy to DMA buffer failed\n");
3971                 goto out;
3972         }
3973
3974         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
3975
3976         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3977
3978         if (!result)
3979                 result = count;
3980 out:
3981         ipr_free_ucode_buffer(sglist);
3982         release_firmware(fw_entry);
3983         return result;
3984 }
3985
3986 static struct device_attribute ipr_update_fw_attr = {
3987         .attr = {
3988                 .name =         "update_fw",
3989                 .mode =         S_IWUSR,
3990         },
3991         .store = ipr_store_update_fw
3992 };
3993
3994 /**
3995  * ipr_show_fw_type - Show the adapter's firmware type.
3996  * @dev:        class device struct
3997  * @buf:        buffer
3998  *
3999  * Return value:
4000  *      number of bytes printed to buffer
4001  **/
4002 static ssize_t ipr_show_fw_type(struct device *dev,
4003                                 struct device_attribute *attr, char *buf)
4004 {
4005         struct Scsi_Host *shost = class_to_shost(dev);
4006         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4007         unsigned long lock_flags = 0;
4008         int len;
4009
4010         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4011         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4012         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4013         return len;
4014 }
4015
4016 static struct device_attribute ipr_ioa_fw_type_attr = {
4017         .attr = {
4018                 .name =         "fw_type",
4019                 .mode =         S_IRUGO,
4020         },
4021         .show = ipr_show_fw_type
4022 };
4023
4024 static struct device_attribute *ipr_ioa_attrs[] = {
4025         &ipr_fw_version_attr,
4026         &ipr_log_level_attr,
4027         &ipr_diagnostics_attr,
4028         &ipr_ioa_state_attr,
4029         &ipr_ioa_reset_attr,
4030         &ipr_update_fw_attr,
4031         &ipr_ioa_fw_type_attr,
4032         &ipr_iopoll_weight_attr,
4033         NULL,
4034 };
4035
4036 #ifdef CONFIG_SCSI_IPR_DUMP
4037 /**
4038  * ipr_read_dump - Dump the adapter
4039  * @filp:               open sysfs file
4040  * @kobj:               kobject struct
4041  * @bin_attr:           bin_attribute struct
4042  * @buf:                buffer
4043  * @off:                offset
4044  * @count:              buffer size
4045  *
4046  * Return value:
4047  *      number of bytes printed to buffer
4048  **/
4049 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4050                              struct bin_attribute *bin_attr,
4051                              char *buf, loff_t off, size_t count)
4052 {
4053         struct device *cdev = container_of(kobj, struct device, kobj);
4054         struct Scsi_Host *shost = class_to_shost(cdev);
4055         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4056         struct ipr_dump *dump;
4057         unsigned long lock_flags = 0;
4058         char *src;
4059         int len, sdt_end;
4060         size_t rc = count;
4061
4062         if (!capable(CAP_SYS_ADMIN))
4063                 return -EACCES;
4064
4065         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066         dump = ioa_cfg->dump;
4067
4068         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4069                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4070                 return 0;
4071         }
4072         kref_get(&dump->kref);
4073         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4074
4075         if (off > dump->driver_dump.hdr.len) {
4076                 kref_put(&dump->kref, ipr_release_dump);
4077                 return 0;
4078         }
4079
4080         if (off + count > dump->driver_dump.hdr.len) {
4081                 count = dump->driver_dump.hdr.len - off;
4082                 rc = count;
4083         }
4084
4085         if (count && off < sizeof(dump->driver_dump)) {
4086                 if (off + count > sizeof(dump->driver_dump))
4087                         len = sizeof(dump->driver_dump) - off;
4088                 else
4089                         len = count;
4090                 src = (u8 *)&dump->driver_dump + off;
4091                 memcpy(buf, src, len);
4092                 buf += len;
4093                 off += len;
4094                 count -= len;
4095         }
4096
4097         off -= sizeof(dump->driver_dump);
4098
4099         if (ioa_cfg->sis64)
4100                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4101                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4102                            sizeof(struct ipr_sdt_entry));
4103         else
4104                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4105                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4106
4107         if (count && off < sdt_end) {
4108                 if (off + count > sdt_end)
4109                         len = sdt_end - off;
4110                 else
4111                         len = count;
4112                 src = (u8 *)&dump->ioa_dump + off;
4113                 memcpy(buf, src, len);
4114                 buf += len;
4115                 off += len;
4116                 count -= len;
4117         }
4118
4119         off -= sdt_end;
4120
4121         while (count) {
4122                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4123                         len = PAGE_ALIGN(off) - off;
4124                 else
4125                         len = count;
4126                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4127                 src += off & ~PAGE_MASK;
4128                 memcpy(buf, src, len);
4129                 buf += len;
4130                 off += len;
4131                 count -= len;
4132         }
4133
4134         kref_put(&dump->kref, ipr_release_dump);
4135         return rc;
4136 }
4137
4138 /**
4139  * ipr_alloc_dump - Prepare for adapter dump
4140  * @ioa_cfg:    ioa config struct
4141  *
4142  * Return value:
4143  *      0 on success / other on failure
4144  **/
4145 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4146 {
4147         struct ipr_dump *dump;
4148         __be32 **ioa_data;
4149         unsigned long lock_flags = 0;
4150
4151         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4152
4153         if (!dump) {
4154                 ipr_err("Dump memory allocation failed\n");
4155                 return -ENOMEM;
4156         }
4157
4158         if (ioa_cfg->sis64)
4159                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4160         else
4161                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4162
4163         if (!ioa_data) {
4164                 ipr_err("Dump memory allocation failed\n");
4165                 kfree(dump);
4166                 return -ENOMEM;
4167         }
4168
4169         dump->ioa_dump.ioa_data = ioa_data;
4170
4171         kref_init(&dump->kref);
4172         dump->ioa_cfg = ioa_cfg;
4173
4174         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4175
4176         if (INACTIVE != ioa_cfg->sdt_state) {
4177                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4178                 vfree(dump->ioa_dump.ioa_data);
4179                 kfree(dump);
4180                 return 0;
4181         }
4182
4183         ioa_cfg->dump = dump;
4184         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4185         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4186                 ioa_cfg->dump_taken = 1;
4187                 schedule_work(&ioa_cfg->work_q);
4188         }
4189         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4190
4191         return 0;
4192 }
4193
4194 /**
4195  * ipr_free_dump - Free adapter dump memory
4196  * @ioa_cfg:    ioa config struct
4197  *
4198  * Return value:
4199  *      0 on success / other on failure
4200  **/
4201 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4202 {
4203         struct ipr_dump *dump;
4204         unsigned long lock_flags = 0;
4205
4206         ENTER;
4207
4208         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4209         dump = ioa_cfg->dump;
4210         if (!dump) {
4211                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4212                 return 0;
4213         }
4214
4215         ioa_cfg->dump = NULL;
4216         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4217
4218         kref_put(&dump->kref, ipr_release_dump);
4219
4220         LEAVE;
4221         return 0;
4222 }
4223
4224 /**
4225  * ipr_write_dump - Setup dump state of adapter
4226  * @filp:               open sysfs file
4227  * @kobj:               kobject struct
4228  * @bin_attr:           bin_attribute struct
4229  * @buf:                buffer
4230  * @off:                offset
4231  * @count:              buffer size
4232  *
4233  * Return value:
4234  *      number of bytes printed to buffer
4235  **/
4236 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4237                               struct bin_attribute *bin_attr,
4238                               char *buf, loff_t off, size_t count)
4239 {
4240         struct device *cdev = container_of(kobj, struct device, kobj);
4241         struct Scsi_Host *shost = class_to_shost(cdev);
4242         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4243         int rc;
4244
4245         if (!capable(CAP_SYS_ADMIN))
4246                 return -EACCES;
4247
4248         if (buf[0] == '1')
4249                 rc = ipr_alloc_dump(ioa_cfg);
4250         else if (buf[0] == '0')
4251                 rc = ipr_free_dump(ioa_cfg);
4252         else
4253                 return -EINVAL;
4254
4255         if (rc)
4256                 return rc;
4257         else
4258                 return count;
4259 }
4260
4261 static struct bin_attribute ipr_dump_attr = {
4262         .attr = {
4263                 .name = "dump",
4264                 .mode = S_IRUSR | S_IWUSR,
4265         },
4266         .size = 0,
4267         .read = ipr_read_dump,
4268         .write = ipr_write_dump
4269 };
4270 #else
4271 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4272 #endif
4273
4274 /**
4275  * ipr_change_queue_depth - Change the device's queue depth
4276  * @sdev:       scsi device struct
4277  * @qdepth:     depth to set
4278  * @reason:     calling context
4279  *
4280  * Return value:
4281  *      actual depth set
4282  **/
4283 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4284                                   int reason)
4285 {
4286         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4287         struct ipr_resource_entry *res;
4288         unsigned long lock_flags = 0;
4289
4290         if (reason != SCSI_QDEPTH_DEFAULT)
4291                 return -EOPNOTSUPP;
4292
4293         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4294         res = (struct ipr_resource_entry *)sdev->hostdata;
4295
4296         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4297                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4298         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4299
4300         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4301         return sdev->queue_depth;
4302 }
4303
4304 /**
4305  * ipr_change_queue_type - Change the device's queue type
4306  * @dsev:               scsi device struct
4307  * @tag_type:   type of tags to use
4308  *
4309  * Return value:
4310  *      actual queue type set
4311  **/
4312 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4313 {
4314         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4315         struct ipr_resource_entry *res;
4316         unsigned long lock_flags = 0;
4317
4318         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4319         res = (struct ipr_resource_entry *)sdev->hostdata;
4320
4321         if (res) {
4322                 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4323                         /*
4324                          * We don't bother quiescing the device here since the
4325                          * adapter firmware does it for us.
4326                          */
4327                         scsi_set_tag_type(sdev, tag_type);
4328
4329                         if (tag_type)
4330                                 scsi_activate_tcq(sdev, sdev->queue_depth);
4331                         else
4332                                 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4333                 } else
4334                         tag_type = 0;
4335         } else
4336                 tag_type = 0;
4337
4338         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4339         return tag_type;
4340 }
4341
4342 /**
4343  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4344  * @dev:        device struct
4345  * @attr:       device attribute structure
4346  * @buf:        buffer
4347  *
4348  * Return value:
4349  *      number of bytes printed to buffer
4350  **/
4351 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4352 {
4353         struct scsi_device *sdev = to_scsi_device(dev);
4354         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4355         struct ipr_resource_entry *res;
4356         unsigned long lock_flags = 0;
4357         ssize_t len = -ENXIO;
4358
4359         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4360         res = (struct ipr_resource_entry *)sdev->hostdata;
4361         if (res)
4362                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4363         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4364         return len;
4365 }
4366
4367 static struct device_attribute ipr_adapter_handle_attr = {
4368         .attr = {
4369                 .name =         "adapter_handle",
4370                 .mode =         S_IRUSR,
4371         },
4372         .show = ipr_show_adapter_handle
4373 };
4374
4375 /**
4376  * ipr_show_resource_path - Show the resource path or the resource address for
4377  *                          this device.
4378  * @dev:        device struct
4379  * @attr:       device attribute structure
4380  * @buf:        buffer
4381  *
4382  * Return value:
4383  *      number of bytes printed to buffer
4384  **/
4385 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4386 {
4387         struct scsi_device *sdev = to_scsi_device(dev);
4388         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4389         struct ipr_resource_entry *res;
4390         unsigned long lock_flags = 0;
4391         ssize_t len = -ENXIO;
4392         char buffer[IPR_MAX_RES_PATH_LENGTH];
4393
4394         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4395         res = (struct ipr_resource_entry *)sdev->hostdata;
4396         if (res && ioa_cfg->sis64)
4397                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4398                                __ipr_format_res_path(res->res_path, buffer,
4399                                                      sizeof(buffer)));
4400         else if (res)
4401                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4402                                res->bus, res->target, res->lun);
4403
4404         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4405         return len;
4406 }
4407
4408 static struct device_attribute ipr_resource_path_attr = {
4409         .attr = {
4410                 .name =         "resource_path",
4411                 .mode =         S_IRUGO,
4412         },
4413         .show = ipr_show_resource_path
4414 };
4415
4416 /**
4417  * ipr_show_device_id - Show the device_id for this device.
4418  * @dev:        device struct
4419  * @attr:       device attribute structure
4420  * @buf:        buffer
4421  *
4422  * Return value:
4423  *      number of bytes printed to buffer
4424  **/
4425 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4426 {
4427         struct scsi_device *sdev = to_scsi_device(dev);
4428         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4429         struct ipr_resource_entry *res;
4430         unsigned long lock_flags = 0;
4431         ssize_t len = -ENXIO;
4432
4433         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4434         res = (struct ipr_resource_entry *)sdev->hostdata;
4435         if (res && ioa_cfg->sis64)
4436                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4437         else if (res)
4438                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4439
4440         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4441         return len;
4442 }
4443
4444 static struct device_attribute ipr_device_id_attr = {
4445         .attr = {
4446                 .name =         "device_id",
4447                 .mode =         S_IRUGO,
4448         },
4449         .show = ipr_show_device_id
4450 };
4451
4452 /**
4453  * ipr_show_resource_type - Show the resource type for this device.
4454  * @dev:        device struct
4455  * @attr:       device attribute structure
4456  * @buf:        buffer
4457  *
4458  * Return value:
4459  *      number of bytes printed to buffer
4460  **/
4461 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4462 {
4463         struct scsi_device *sdev = to_scsi_device(dev);
4464         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4465         struct ipr_resource_entry *res;
4466         unsigned long lock_flags = 0;
4467         ssize_t len = -ENXIO;
4468
4469         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4470         res = (struct ipr_resource_entry *)sdev->hostdata;
4471
4472         if (res)
4473                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4474
4475         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4476         return len;
4477 }
4478
4479 static struct device_attribute ipr_resource_type_attr = {
4480         .attr = {
4481                 .name =         "resource_type",
4482                 .mode =         S_IRUGO,
4483         },
4484         .show = ipr_show_resource_type
4485 };
4486
4487 static struct device_attribute *ipr_dev_attrs[] = {
4488         &ipr_adapter_handle_attr,
4489         &ipr_resource_path_attr,
4490         &ipr_device_id_attr,
4491         &ipr_resource_type_attr,
4492         NULL,
4493 };
4494
4495 /**
4496  * ipr_biosparam - Return the HSC mapping
4497  * @sdev:                       scsi device struct
4498  * @block_device:       block device pointer
4499  * @capacity:           capacity of the device
4500  * @parm:                       Array containing returned HSC values.
4501  *
4502  * This function generates the HSC parms that fdisk uses.
4503  * We want to make sure we return something that places partitions
4504  * on 4k boundaries for best performance with the IOA.
4505  *
4506  * Return value:
4507  *      0 on success
4508  **/
4509 static int ipr_biosparam(struct scsi_device *sdev,
4510                          struct block_device *block_device,
4511                          sector_t capacity, int *parm)
4512 {
4513         int heads, sectors;
4514         sector_t cylinders;
4515
4516         heads = 128;
4517         sectors = 32;
4518
4519         cylinders = capacity;
4520         sector_div(cylinders, (128 * 32));
4521
4522         /* return result */
4523         parm[0] = heads;
4524         parm[1] = sectors;
4525         parm[2] = cylinders;
4526
4527         return 0;
4528 }
4529
4530 /**
4531  * ipr_find_starget - Find target based on bus/target.
4532  * @starget:    scsi target struct
4533  *
4534  * Return value:
4535  *      resource entry pointer if found / NULL if not found
4536  **/
4537 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4538 {
4539         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4540         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4541         struct ipr_resource_entry *res;
4542
4543         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4544                 if ((res->bus == starget->channel) &&
4545                     (res->target == starget->id)) {
4546                         return res;
4547                 }
4548         }
4549
4550         return NULL;
4551 }
4552
4553 static struct ata_port_info sata_port_info;
4554
4555 /**
4556  * ipr_target_alloc - Prepare for commands to a SCSI target
4557  * @starget:    scsi target struct
4558  *
4559  * If the device is a SATA device, this function allocates an
4560  * ATA port with libata, else it does nothing.
4561  *
4562  * Return value:
4563  *      0 on success / non-0 on failure
4564  **/
4565 static int ipr_target_alloc(struct scsi_target *starget)
4566 {
4567         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4568         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4569         struct ipr_sata_port *sata_port;
4570         struct ata_port *ap;
4571         struct ipr_resource_entry *res;
4572         unsigned long lock_flags;
4573
4574         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4575         res = ipr_find_starget(starget);
4576         starget->hostdata = NULL;
4577
4578         if (res && ipr_is_gata(res)) {
4579                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4580                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4581                 if (!sata_port)
4582                         return -ENOMEM;
4583
4584                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4585                 if (ap) {
4586                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587                         sata_port->ioa_cfg = ioa_cfg;
4588                         sata_port->ap = ap;
4589                         sata_port->res = res;
4590
4591                         res->sata_port = sata_port;
4592                         ap->private_data = sata_port;
4593                         starget->hostdata = sata_port;
4594                 } else {
4595                         kfree(sata_port);
4596                         return -ENOMEM;
4597                 }
4598         }
4599         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4600
4601         return 0;
4602 }
4603
4604 /**
4605  * ipr_target_destroy - Destroy a SCSI target
4606  * @starget:    scsi target struct
4607  *
4608  * If the device was a SATA device, this function frees the libata
4609  * ATA port, else it does nothing.
4610  *
4611  **/
4612 static void ipr_target_destroy(struct scsi_target *starget)
4613 {
4614         struct ipr_sata_port *sata_port = starget->hostdata;
4615         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4616         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4617
4618         if (ioa_cfg->sis64) {
4619                 if (!ipr_find_starget(starget)) {
4620                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4621                                 clear_bit(starget->id, ioa_cfg->array_ids);
4622                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4623                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4624                         else if (starget->channel == 0)
4625                                 clear_bit(starget->id, ioa_cfg->target_ids);
4626                 }
4627         }
4628
4629         if (sata_port) {
4630                 starget->hostdata = NULL;
4631                 ata_sas_port_destroy(sata_port->ap);
4632                 kfree(sata_port);
4633         }
4634 }
4635
4636 /**
4637  * ipr_find_sdev - Find device based on bus/target/lun.
4638  * @sdev:       scsi device struct
4639  *
4640  * Return value:
4641  *      resource entry pointer if found / NULL if not found
4642  **/
4643 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4644 {
4645         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4646         struct ipr_resource_entry *res;
4647
4648         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4649                 if ((res->bus == sdev->channel) &&
4650                     (res->target == sdev->id) &&
4651                     (res->lun == sdev->lun))
4652                         return res;
4653         }
4654
4655         return NULL;
4656 }
4657
4658 /**
4659  * ipr_slave_destroy - Unconfigure a SCSI device
4660  * @sdev:       scsi device struct
4661  *
4662  * Return value:
4663  *      nothing
4664  **/
4665 static void ipr_slave_destroy(struct scsi_device *sdev)
4666 {
4667         struct ipr_resource_entry *res;
4668         struct ipr_ioa_cfg *ioa_cfg;
4669         unsigned long lock_flags = 0;
4670
4671         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4672
4673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4674         res = (struct ipr_resource_entry *) sdev->hostdata;
4675         if (res) {
4676                 if (res->sata_port)
4677                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4678                 sdev->hostdata = NULL;
4679                 res->sdev = NULL;
4680                 res->sata_port = NULL;
4681         }
4682         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4683 }
4684
4685 /**
4686  * ipr_slave_configure - Configure a SCSI device
4687  * @sdev:       scsi device struct
4688  *
4689  * This function configures the specified scsi device.
4690  *
4691  * Return value:
4692  *      0 on success
4693  **/
4694 static int ipr_slave_configure(struct scsi_device *sdev)
4695 {
4696         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4697         struct ipr_resource_entry *res;
4698         struct ata_port *ap = NULL;
4699         unsigned long lock_flags = 0;
4700         char buffer[IPR_MAX_RES_PATH_LENGTH];
4701
4702         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4703         res = sdev->hostdata;
4704         if (res) {
4705                 if (ipr_is_af_dasd_device(res))
4706                         sdev->type = TYPE_RAID;
4707                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4708                         sdev->scsi_level = 4;
4709                         sdev->no_uld_attach = 1;
4710                 }
4711                 if (ipr_is_vset_device(res)) {
4712                         blk_queue_rq_timeout(sdev->request_queue,
4713                                              IPR_VSET_RW_TIMEOUT);
4714                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4715                 }
4716                 if (ipr_is_gata(res) && res->sata_port)
4717                         ap = res->sata_port->ap;
4718                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4719
4720                 if (ap) {
4721                         scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4722                         ata_sas_slave_configure(sdev, ap);
4723                 } else
4724                         scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4725                 if (ioa_cfg->sis64)
4726                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4727                                     ipr_format_res_path(ioa_cfg,
4728                                 res->res_path, buffer, sizeof(buffer)));
4729                 return 0;
4730         }
4731         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4732         return 0;
4733 }
4734
4735 /**
4736  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4737  * @sdev:       scsi device struct
4738  *
4739  * This function initializes an ATA port so that future commands
4740  * sent through queuecommand will work.
4741  *
4742  * Return value:
4743  *      0 on success
4744  **/
4745 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4746 {
4747         struct ipr_sata_port *sata_port = NULL;
4748         int rc = -ENXIO;
4749
4750         ENTER;
4751         if (sdev->sdev_target)
4752                 sata_port = sdev->sdev_target->hostdata;
4753         if (sata_port) {
4754                 rc = ata_sas_port_init(sata_port->ap);
4755                 if (rc == 0)
4756                         rc = ata_sas_sync_probe(sata_port->ap);
4757         }
4758
4759         if (rc)
4760                 ipr_slave_destroy(sdev);
4761
4762         LEAVE;
4763         return rc;
4764 }
4765
4766 /**
4767  * ipr_slave_alloc - Prepare for commands to a device.
4768  * @sdev:       scsi device struct
4769  *
4770  * This function saves a pointer to the resource entry
4771  * in the scsi device struct if the device exists. We
4772  * can then use this pointer in ipr_queuecommand when
4773  * handling new commands.
4774  *
4775  * Return value:
4776  *      0 on success / -ENXIO if device does not exist
4777  **/
4778 static int ipr_slave_alloc(struct scsi_device *sdev)
4779 {
4780         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4781         struct ipr_resource_entry *res;
4782         unsigned long lock_flags;
4783         int rc = -ENXIO;
4784
4785         sdev->hostdata = NULL;
4786
4787         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4788
4789         res = ipr_find_sdev(sdev);
4790         if (res) {
4791                 res->sdev = sdev;
4792                 res->add_to_ml = 0;
4793                 res->in_erp = 0;
4794                 sdev->hostdata = res;
4795                 if (!ipr_is_naca_model(res))
4796                         res->needs_sync_complete = 1;
4797                 rc = 0;
4798                 if (ipr_is_gata(res)) {
4799                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4800                         return ipr_ata_slave_alloc(sdev);
4801                 }
4802         }
4803
4804         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805
4806         return rc;
4807 }
4808
4809 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4810 {
4811         struct ipr_ioa_cfg *ioa_cfg;
4812         unsigned long lock_flags = 0;
4813         int rc = SUCCESS;
4814
4815         ENTER;
4816         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4817         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4818
4819         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4820                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4821                 dev_err(&ioa_cfg->pdev->dev,
4822                         "Adapter being reset as a result of error recovery.\n");
4823
4824                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4825                         ioa_cfg->sdt_state = GET_DUMP;
4826         }
4827
4828         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4829         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4830         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4831
4832         /* If we got hit with a host reset while we were already resetting
4833          the adapter for some reason, and the reset failed. */
4834         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4835                 ipr_trace;
4836                 rc = FAILED;
4837         }
4838
4839         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4840         LEAVE;
4841         return rc;
4842 }
4843
4844 /**
4845  * ipr_device_reset - Reset the device
4846  * @ioa_cfg:    ioa config struct
4847  * @res:                resource entry struct
4848  *
4849  * This function issues a device reset to the affected device.
4850  * If the device is a SCSI device, a LUN reset will be sent
4851  * to the device first. If that does not work, a target reset
4852  * will be sent. If the device is a SATA device, a PHY reset will
4853  * be sent.
4854  *
4855  * Return value:
4856  *      0 on success / non-zero on failure
4857  **/
4858 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4859                             struct ipr_resource_entry *res)
4860 {
4861         struct ipr_cmnd *ipr_cmd;
4862         struct ipr_ioarcb *ioarcb;
4863         struct ipr_cmd_pkt *cmd_pkt;
4864         struct ipr_ioarcb_ata_regs *regs;
4865         u32 ioasc;
4866
4867         ENTER;
4868         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4869         ioarcb = &ipr_cmd->ioarcb;
4870         cmd_pkt = &ioarcb->cmd_pkt;
4871
4872         if (ipr_cmd->ioa_cfg->sis64) {
4873                 regs = &ipr_cmd->i.ata_ioadl.regs;
4874                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4875         } else
4876                 regs = &ioarcb->u.add_data.u.regs;
4877
4878         ioarcb->res_handle = res->res_handle;
4879         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4880         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4881         if (ipr_is_gata(res)) {
4882                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4883                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4884                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4885         }
4886
4887         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4888         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4889         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4890         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4891                 if (ipr_cmd->ioa_cfg->sis64)
4892                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4893                                sizeof(struct ipr_ioasa_gata));
4894                 else
4895                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4896                                sizeof(struct ipr_ioasa_gata));
4897         }
4898
4899         LEAVE;
4900         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4901 }
4902
4903 /**
4904  * ipr_sata_reset - Reset the SATA port
4905  * @link:       SATA link to reset
4906  * @classes:    class of the attached device
4907  *
4908  * This function issues a SATA phy reset to the affected ATA link.
4909  *
4910  * Return value:
4911  *      0 on success / non-zero on failure
4912  **/
4913 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4914                                 unsigned long deadline)
4915 {
4916         struct ipr_sata_port *sata_port = link->ap->private_data;
4917         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4918         struct ipr_resource_entry *res;
4919         unsigned long lock_flags = 0;
4920         int rc = -ENXIO;
4921
4922         ENTER;
4923         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4924         while (ioa_cfg->in_reset_reload) {
4925                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4926                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4927                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4928         }
4929
4930         res = sata_port->res;
4931         if (res) {
4932                 rc = ipr_device_reset(ioa_cfg, res);
4933                 *classes = res->ata_class;
4934         }
4935
4936         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4937         LEAVE;
4938         return rc;
4939 }
4940
4941 /**
4942  * ipr_eh_dev_reset - Reset the device
4943  * @scsi_cmd:   scsi command struct
4944  *
4945  * This function issues a device reset to the affected device.
4946  * A LUN reset will be sent to the device first. If that does
4947  * not work, a target reset will be sent.
4948  *
4949  * Return value:
4950  *      SUCCESS / FAILED
4951  **/
4952 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
4953 {
4954         struct ipr_cmnd *ipr_cmd;
4955         struct ipr_ioa_cfg *ioa_cfg;
4956         struct ipr_resource_entry *res;
4957         struct ata_port *ap;
4958         int rc = 0;
4959         struct ipr_hrr_queue *hrrq;
4960
4961         ENTER;
4962         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4963         res = scsi_cmd->device->hostdata;
4964
4965         if (!res)
4966                 return FAILED;
4967
4968         /*
4969          * If we are currently going through reset/reload, return failed. This will force the
4970          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4971          * reset to complete
4972          */
4973         if (ioa_cfg->in_reset_reload)
4974                 return FAILED;
4975         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
4976                 return FAILED;
4977
4978         for_each_hrrq(hrrq, ioa_cfg) {
4979                 spin_lock(&hrrq->_lock);
4980                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4981                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4982                                 if (ipr_cmd->scsi_cmd)
4983                                         ipr_cmd->done = ipr_scsi_eh_done;
4984                                 if (ipr_cmd->qc)
4985                                         ipr_cmd->done = ipr_sata_eh_done;
4986                                 if (ipr_cmd->qc &&
4987                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4988                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4989                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4990                                 }
4991                         }
4992                 }
4993                 spin_unlock(&hrrq->_lock);
4994         }
4995         res->resetting_device = 1;
4996         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4997
4998         if (ipr_is_gata(res) && res->sata_port) {
4999                 ap = res->sata_port->ap;
5000                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5001                 ata_std_error_handler(ap);
5002                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5003
5004                 for_each_hrrq(hrrq, ioa_cfg) {
5005                         spin_lock(&hrrq->_lock);
5006                         list_for_each_entry(ipr_cmd,
5007                                             &hrrq->hrrq_pending_q, queue) {
5008                                 if (ipr_cmd->ioarcb.res_handle ==
5009                                     res->res_handle) {
5010                                         rc = -EIO;
5011                                         break;
5012                                 }
5013                         }
5014                         spin_unlock(&hrrq->_lock);
5015                 }
5016         } else
5017                 rc = ipr_device_reset(ioa_cfg, res);
5018         res->resetting_device = 0;
5019         res->reset_occurred = 1;
5020
5021         LEAVE;
5022         return rc ? FAILED : SUCCESS;
5023 }
5024
5025 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5026 {
5027         int rc;
5028
5029         spin_lock_irq(cmd->device->host->host_lock);
5030         rc = __ipr_eh_dev_reset(cmd);
5031         spin_unlock_irq(cmd->device->host->host_lock);
5032
5033         return rc;
5034 }
5035
5036 /**
5037  * ipr_bus_reset_done - Op done function for bus reset.
5038  * @ipr_cmd:    ipr command struct
5039  *
5040  * This function is the op done function for a bus reset
5041  *
5042  * Return value:
5043  *      none
5044  **/
5045 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5046 {
5047         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5048         struct ipr_resource_entry *res;
5049
5050         ENTER;
5051         if (!ioa_cfg->sis64)
5052                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5053                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5054                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5055                                 break;
5056                         }
5057                 }
5058
5059         /*
5060          * If abort has not completed, indicate the reset has, else call the
5061          * abort's done function to wake the sleeping eh thread
5062          */
5063         if (ipr_cmd->sibling->sibling)
5064                 ipr_cmd->sibling->sibling = NULL;
5065         else
5066                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5067
5068         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5069         LEAVE;
5070 }
5071
5072 /**
5073  * ipr_abort_timeout - An abort task has timed out
5074  * @ipr_cmd:    ipr command struct
5075  *
5076  * This function handles when an abort task times out. If this
5077  * happens we issue a bus reset since we have resources tied
5078  * up that must be freed before returning to the midlayer.
5079  *
5080  * Return value:
5081  *      none
5082  **/
5083 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5084 {
5085         struct ipr_cmnd *reset_cmd;
5086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5087         struct ipr_cmd_pkt *cmd_pkt;
5088         unsigned long lock_flags = 0;
5089
5090         ENTER;
5091         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5092         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5093                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5094                 return;
5095         }
5096
5097         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5098         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5099         ipr_cmd->sibling = reset_cmd;
5100         reset_cmd->sibling = ipr_cmd;
5101         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5102         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5103         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5104         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5105         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5106
5107         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5108         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5109         LEAVE;
5110 }
5111
5112 /**
5113  * ipr_cancel_op - Cancel specified op
5114  * @scsi_cmd:   scsi command struct
5115  *
5116  * This function cancels specified op.
5117  *
5118  * Return value:
5119  *      SUCCESS / FAILED
5120  **/
5121 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5122 {
5123         struct ipr_cmnd *ipr_cmd;
5124         struct ipr_ioa_cfg *ioa_cfg;
5125         struct ipr_resource_entry *res;
5126         struct ipr_cmd_pkt *cmd_pkt;
5127         u32 ioasc, int_reg;
5128         int op_found = 0;
5129         struct ipr_hrr_queue *hrrq;
5130
5131         ENTER;
5132         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5133         res = scsi_cmd->device->hostdata;
5134
5135         /* If we are currently going through reset/reload, return failed.
5136          * This will force the mid-layer to call ipr_eh_host_reset,
5137          * which will then go to sleep and wait for the reset to complete
5138          */
5139         if (ioa_cfg->in_reset_reload ||
5140             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5141                 return FAILED;
5142         if (!res)
5143                 return FAILED;
5144
5145         /*
5146          * If we are aborting a timed out op, chances are that the timeout was caused
5147          * by a still not detected EEH error. In such cases, reading a register will
5148          * trigger the EEH recovery infrastructure.
5149          */
5150         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5151
5152         if (!ipr_is_gscsi(res))
5153                 return FAILED;
5154
5155         for_each_hrrq(hrrq, ioa_cfg) {
5156                 spin_lock(&hrrq->_lock);
5157                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5158                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5159                                 ipr_cmd->done = ipr_scsi_eh_done;
5160                                 op_found = 1;
5161                                 break;
5162                         }
5163                 }
5164                 spin_unlock(&hrrq->_lock);
5165         }
5166
5167         if (!op_found)
5168                 return SUCCESS;
5169
5170         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5171         ipr_cmd->ioarcb.res_handle = res->res_handle;
5172         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5173         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5174         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5175         ipr_cmd->u.sdev = scsi_cmd->device;
5176
5177         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5178                     scsi_cmd->cmnd[0]);
5179         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5180         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5181
5182         /*
5183          * If the abort task timed out and we sent a bus reset, we will get
5184          * one the following responses to the abort
5185          */
5186         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5187                 ioasc = 0;
5188                 ipr_trace;
5189         }
5190
5191         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5192         if (!ipr_is_naca_model(res))
5193                 res->needs_sync_complete = 1;
5194
5195         LEAVE;
5196         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5197 }
5198
5199 /**
5200  * ipr_eh_abort - Abort a single op
5201  * @scsi_cmd:   scsi command struct
5202  *
5203  * Return value:
5204  *      SUCCESS / FAILED
5205  **/
5206 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5207 {
5208         unsigned long flags;
5209         int rc;
5210
5211         ENTER;
5212
5213         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5214         rc = ipr_cancel_op(scsi_cmd);
5215         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5216
5217         LEAVE;
5218         return rc;
5219 }
5220
5221 /**
5222  * ipr_handle_other_interrupt - Handle "other" interrupts
5223  * @ioa_cfg:    ioa config struct
5224  * @int_reg:    interrupt register
5225  *
5226  * Return value:
5227  *      IRQ_NONE / IRQ_HANDLED
5228  **/
5229 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5230                                               u32 int_reg)
5231 {
5232         irqreturn_t rc = IRQ_HANDLED;
5233         u32 int_mask_reg;
5234
5235         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5236         int_reg &= ~int_mask_reg;
5237
5238         /* If an interrupt on the adapter did not occur, ignore it.
5239          * Or in the case of SIS 64, check for a stage change interrupt.
5240          */
5241         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5242                 if (ioa_cfg->sis64) {
5243                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5244                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5245                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5246
5247                                 /* clear stage change */
5248                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5249                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5250                                 list_del(&ioa_cfg->reset_cmd->queue);
5251                                 del_timer(&ioa_cfg->reset_cmd->timer);
5252                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5253                                 return IRQ_HANDLED;
5254                         }
5255                 }
5256
5257                 return IRQ_NONE;
5258         }
5259
5260         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5261                 /* Mask the interrupt */
5262                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5263
5264                 /* Clear the interrupt */
5265                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5266                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5267
5268                 list_del(&ioa_cfg->reset_cmd->queue);
5269                 del_timer(&ioa_cfg->reset_cmd->timer);
5270                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5271         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5272                 if (ioa_cfg->clear_isr) {
5273                         if (ipr_debug && printk_ratelimit())
5274                                 dev_err(&ioa_cfg->pdev->dev,
5275                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5276                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5277                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5278                         return IRQ_NONE;
5279                 }
5280         } else {
5281                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5282                         ioa_cfg->ioa_unit_checked = 1;
5283                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5284                         dev_err(&ioa_cfg->pdev->dev,
5285                                 "No Host RRQ. 0x%08X\n", int_reg);
5286                 else
5287                         dev_err(&ioa_cfg->pdev->dev,
5288                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5289
5290                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5291                         ioa_cfg->sdt_state = GET_DUMP;
5292
5293                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5294                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5295         }
5296
5297         return rc;
5298 }
5299
5300 /**
5301  * ipr_isr_eh - Interrupt service routine error handler
5302  * @ioa_cfg:    ioa config struct
5303  * @msg:        message to log
5304  *
5305  * Return value:
5306  *      none
5307  **/
5308 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5309 {
5310         ioa_cfg->errors_logged++;
5311         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5312
5313         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5314                 ioa_cfg->sdt_state = GET_DUMP;
5315
5316         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5317 }
5318
5319 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5320                                                 struct list_head *doneq)
5321 {
5322         u32 ioasc;
5323         u16 cmd_index;
5324         struct ipr_cmnd *ipr_cmd;
5325         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5326         int num_hrrq = 0;
5327
5328         /* If interrupts are disabled, ignore the interrupt */
5329         if (!hrr_queue->allow_interrupts)
5330                 return 0;
5331
5332         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5333                hrr_queue->toggle_bit) {
5334
5335                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5336                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5337                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5338
5339                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5340                              cmd_index < hrr_queue->min_cmd_id)) {
5341                         ipr_isr_eh(ioa_cfg,
5342                                 "Invalid response handle from IOA: ",
5343                                 cmd_index);
5344                         break;
5345                 }
5346
5347                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5348                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5349
5350                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5351
5352                 list_move_tail(&ipr_cmd->queue, doneq);
5353
5354                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5355                         hrr_queue->hrrq_curr++;
5356                 } else {
5357                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5358                         hrr_queue->toggle_bit ^= 1u;
5359                 }
5360                 num_hrrq++;
5361                 if (budget > 0 && num_hrrq >= budget)
5362                         break;
5363         }
5364
5365         return num_hrrq;
5366 }
5367
5368 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5369 {
5370         struct ipr_ioa_cfg *ioa_cfg;
5371         struct ipr_hrr_queue *hrrq;
5372         struct ipr_cmnd *ipr_cmd, *temp;
5373         unsigned long hrrq_flags;
5374         int completed_ops;
5375         LIST_HEAD(doneq);
5376
5377         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5378         ioa_cfg = hrrq->ioa_cfg;
5379
5380         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5381         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5382
5383         if (completed_ops < budget)
5384                 blk_iopoll_complete(iop);
5385         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5386
5387         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5388                 list_del(&ipr_cmd->queue);
5389                 del_timer(&ipr_cmd->timer);
5390                 ipr_cmd->fast_done(ipr_cmd);
5391         }
5392
5393         return completed_ops;
5394 }
5395
5396 /**
5397  * ipr_isr - Interrupt service routine
5398  * @irq:        irq number
5399  * @devp:       pointer to ioa config struct
5400  *
5401  * Return value:
5402  *      IRQ_NONE / IRQ_HANDLED
5403  **/
5404 static irqreturn_t ipr_isr(int irq, void *devp)
5405 {
5406         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5407         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5408         unsigned long hrrq_flags = 0;
5409         u32 int_reg = 0;
5410         int num_hrrq = 0;
5411         int irq_none = 0;
5412         struct ipr_cmnd *ipr_cmd, *temp;
5413         irqreturn_t rc = IRQ_NONE;
5414         LIST_HEAD(doneq);
5415
5416         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5417         /* If interrupts are disabled, ignore the interrupt */
5418         if (!hrrq->allow_interrupts) {
5419                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5420                 return IRQ_NONE;
5421         }
5422
5423         while (1) {
5424                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5425                         rc =  IRQ_HANDLED;
5426
5427                         if (!ioa_cfg->clear_isr)
5428                                 break;
5429
5430                         /* Clear the PCI interrupt */
5431                         num_hrrq = 0;
5432                         do {
5433                                 writel(IPR_PCII_HRRQ_UPDATED,
5434                                      ioa_cfg->regs.clr_interrupt_reg32);
5435                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5436                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5437                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5438
5439                 } else if (rc == IRQ_NONE && irq_none == 0) {
5440                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5441                         irq_none++;
5442                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5443                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5444                         ipr_isr_eh(ioa_cfg,
5445                                 "Error clearing HRRQ: ", num_hrrq);
5446                         rc = IRQ_HANDLED;
5447                         break;
5448                 } else
5449                         break;
5450         }
5451
5452         if (unlikely(rc == IRQ_NONE))
5453                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5454
5455         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5456         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5457                 list_del(&ipr_cmd->queue);
5458                 del_timer(&ipr_cmd->timer);
5459                 ipr_cmd->fast_done(ipr_cmd);
5460         }
5461         return rc;
5462 }
5463
5464 /**
5465  * ipr_isr_mhrrq - Interrupt service routine
5466  * @irq:        irq number
5467  * @devp:       pointer to ioa config struct
5468  *
5469  * Return value:
5470  *      IRQ_NONE / IRQ_HANDLED
5471  **/
5472 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5473 {
5474         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5475         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5476         unsigned long hrrq_flags = 0;
5477         struct ipr_cmnd *ipr_cmd, *temp;
5478         irqreturn_t rc = IRQ_NONE;
5479         LIST_HEAD(doneq);
5480
5481         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5482
5483         /* If interrupts are disabled, ignore the interrupt */
5484         if (!hrrq->allow_interrupts) {
5485                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5486                 return IRQ_NONE;
5487         }
5488
5489         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
5490                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5491                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5492                        hrrq->toggle_bit) {
5493                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5494                                 blk_iopoll_sched(&hrrq->iopoll);
5495                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5496                         return IRQ_HANDLED;
5497                 }
5498         } else {
5499                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5500                         hrrq->toggle_bit)
5501
5502                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5503                                 rc =  IRQ_HANDLED;
5504         }
5505
5506         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5507
5508         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509                 list_del(&ipr_cmd->queue);
5510                 del_timer(&ipr_cmd->timer);
5511                 ipr_cmd->fast_done(ipr_cmd);
5512         }
5513         return rc;
5514 }
5515
5516 /**
5517  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5518  * @ioa_cfg:    ioa config struct
5519  * @ipr_cmd:    ipr command struct
5520  *
5521  * Return value:
5522  *      0 on success / -1 on failure
5523  **/
5524 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5525                              struct ipr_cmnd *ipr_cmd)
5526 {
5527         int i, nseg;
5528         struct scatterlist *sg;
5529         u32 length;
5530         u32 ioadl_flags = 0;
5531         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5532         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5533         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5534
5535         length = scsi_bufflen(scsi_cmd);
5536         if (!length)
5537                 return 0;
5538
5539         nseg = scsi_dma_map(scsi_cmd);
5540         if (nseg < 0) {
5541                 if (printk_ratelimit())
5542                         dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5543                 return -1;
5544         }
5545
5546         ipr_cmd->dma_use_sg = nseg;
5547
5548         ioarcb->data_transfer_length = cpu_to_be32(length);
5549         ioarcb->ioadl_len =
5550                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5551
5552         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5553                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5554                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5555         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5556                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5557
5558         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5559                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5560                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5561                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5562         }
5563
5564         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5565         return 0;
5566 }
5567
5568 /**
5569  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5570  * @ioa_cfg:    ioa config struct
5571  * @ipr_cmd:    ipr command struct
5572  *
5573  * Return value:
5574  *      0 on success / -1 on failure
5575  **/
5576 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5577                            struct ipr_cmnd *ipr_cmd)
5578 {
5579         int i, nseg;
5580         struct scatterlist *sg;
5581         u32 length;
5582         u32 ioadl_flags = 0;
5583         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5584         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5585         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5586
5587         length = scsi_bufflen(scsi_cmd);
5588         if (!length)
5589                 return 0;
5590
5591         nseg = scsi_dma_map(scsi_cmd);
5592         if (nseg < 0) {
5593                 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5594                 return -1;
5595         }
5596
5597         ipr_cmd->dma_use_sg = nseg;
5598
5599         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5600                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5601                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5602                 ioarcb->data_transfer_length = cpu_to_be32(length);
5603                 ioarcb->ioadl_len =
5604                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5605         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5606                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5607                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5608                 ioarcb->read_ioadl_len =
5609                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5610         }
5611
5612         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5613                 ioadl = ioarcb->u.add_data.u.ioadl;
5614                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5615                                     offsetof(struct ipr_ioarcb, u.add_data));
5616                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5617         }
5618
5619         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5620                 ioadl[i].flags_and_data_len =
5621                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5622                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5623         }
5624
5625         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5626         return 0;
5627 }
5628
5629 /**
5630  * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5631  * @scsi_cmd:   scsi command struct
5632  *
5633  * Return value:
5634  *      task attributes
5635  **/
5636 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5637 {
5638         u8 tag[2];
5639         u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5640
5641         if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5642                 switch (tag[0]) {
5643                 case MSG_SIMPLE_TAG:
5644                         rc = IPR_FLAGS_LO_SIMPLE_TASK;
5645                         break;
5646                 case MSG_HEAD_TAG:
5647                         rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5648                         break;
5649                 case MSG_ORDERED_TAG:
5650                         rc = IPR_FLAGS_LO_ORDERED_TASK;
5651                         break;
5652                 };
5653         }
5654
5655         return rc;
5656 }
5657
5658 /**
5659  * ipr_erp_done - Process completion of ERP for a device
5660  * @ipr_cmd:            ipr command struct
5661  *
5662  * This function copies the sense buffer into the scsi_cmd
5663  * struct and pushes the scsi_done function.
5664  *
5665  * Return value:
5666  *      nothing
5667  **/
5668 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5669 {
5670         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5671         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5672         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5673
5674         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5675                 scsi_cmd->result |= (DID_ERROR << 16);
5676                 scmd_printk(KERN_ERR, scsi_cmd,
5677                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5678         } else {
5679                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5680                        SCSI_SENSE_BUFFERSIZE);
5681         }
5682
5683         if (res) {
5684                 if (!ipr_is_naca_model(res))
5685                         res->needs_sync_complete = 1;
5686                 res->in_erp = 0;
5687         }
5688         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5689         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5690         scsi_cmd->scsi_done(scsi_cmd);
5691 }
5692
5693 /**
5694  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5695  * @ipr_cmd:    ipr command struct
5696  *
5697  * Return value:
5698  *      none
5699  **/
5700 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5701 {
5702         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5703         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5704         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5705
5706         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5707         ioarcb->data_transfer_length = 0;
5708         ioarcb->read_data_transfer_length = 0;
5709         ioarcb->ioadl_len = 0;
5710         ioarcb->read_ioadl_len = 0;
5711         ioasa->hdr.ioasc = 0;
5712         ioasa->hdr.residual_data_len = 0;
5713
5714         if (ipr_cmd->ioa_cfg->sis64)
5715                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5716                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5717         else {
5718                 ioarcb->write_ioadl_addr =
5719                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5720                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5721         }
5722 }
5723
5724 /**
5725  * ipr_erp_request_sense - Send request sense to a device
5726  * @ipr_cmd:    ipr command struct
5727  *
5728  * This function sends a request sense to a device as a result
5729  * of a check condition.
5730  *
5731  * Return value:
5732  *      nothing
5733  **/
5734 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5735 {
5736         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5737         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5738
5739         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5740                 ipr_erp_done(ipr_cmd);
5741                 return;
5742         }
5743
5744         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5745
5746         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5747         cmd_pkt->cdb[0] = REQUEST_SENSE;
5748         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5749         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5750         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5751         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5752
5753         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5754                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5755
5756         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5757                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5758 }
5759
5760 /**
5761  * ipr_erp_cancel_all - Send cancel all to a device
5762  * @ipr_cmd:    ipr command struct
5763  *
5764  * This function sends a cancel all to a device to clear the
5765  * queue. If we are running TCQ on the device, QERR is set to 1,
5766  * which means all outstanding ops have been dropped on the floor.
5767  * Cancel all will return them to us.
5768  *
5769  * Return value:
5770  *      nothing
5771  **/
5772 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5773 {
5774         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5775         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5776         struct ipr_cmd_pkt *cmd_pkt;
5777
5778         res->in_erp = 1;
5779
5780         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5781
5782         if (!scsi_get_tag_type(scsi_cmd->device)) {
5783                 ipr_erp_request_sense(ipr_cmd);
5784                 return;
5785         }
5786
5787         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5788         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5789         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5790
5791         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5792                    IPR_CANCEL_ALL_TIMEOUT);
5793 }
5794
5795 /**
5796  * ipr_dump_ioasa - Dump contents of IOASA
5797  * @ioa_cfg:    ioa config struct
5798  * @ipr_cmd:    ipr command struct
5799  * @res:                resource entry struct
5800  *
5801  * This function is invoked by the interrupt handler when ops
5802  * fail. It will log the IOASA if appropriate. Only called
5803  * for GPDD ops.
5804  *
5805  * Return value:
5806  *      none
5807  **/
5808 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5809                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5810 {
5811         int i;
5812         u16 data_len;
5813         u32 ioasc, fd_ioasc;
5814         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5815         __be32 *ioasa_data = (__be32 *)ioasa;
5816         int error_index;
5817
5818         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5819         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5820
5821         if (0 == ioasc)
5822                 return;
5823
5824         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5825                 return;
5826
5827         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5828                 error_index = ipr_get_error(fd_ioasc);
5829         else
5830                 error_index = ipr_get_error(ioasc);
5831
5832         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5833                 /* Don't log an error if the IOA already logged one */
5834                 if (ioasa->hdr.ilid != 0)
5835                         return;
5836
5837                 if (!ipr_is_gscsi(res))
5838                         return;
5839
5840                 if (ipr_error_table[error_index].log_ioasa == 0)
5841                         return;
5842         }
5843
5844         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5845
5846         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5847         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5848                 data_len = sizeof(struct ipr_ioasa64);
5849         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5850                 data_len = sizeof(struct ipr_ioasa);
5851
5852         ipr_err("IOASA Dump:\n");
5853
5854         for (i = 0; i < data_len / 4; i += 4) {
5855                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5856                         be32_to_cpu(ioasa_data[i]),
5857                         be32_to_cpu(ioasa_data[i+1]),
5858                         be32_to_cpu(ioasa_data[i+2]),
5859                         be32_to_cpu(ioasa_data[i+3]));
5860         }
5861 }
5862
5863 /**
5864  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5865  * @ioasa:              IOASA
5866  * @sense_buf:  sense data buffer
5867  *
5868  * Return value:
5869  *      none
5870  **/
5871 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5872 {
5873         u32 failing_lba;
5874         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5875         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5876         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5877         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5878
5879         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5880
5881         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5882                 return;
5883
5884         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5885
5886         if (ipr_is_vset_device(res) &&
5887             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5888             ioasa->u.vset.failing_lba_hi != 0) {
5889                 sense_buf[0] = 0x72;
5890                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5891                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5892                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5893
5894                 sense_buf[7] = 12;
5895                 sense_buf[8] = 0;
5896                 sense_buf[9] = 0x0A;
5897                 sense_buf[10] = 0x80;
5898
5899                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5900
5901                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5902                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5903                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5904                 sense_buf[15] = failing_lba & 0x000000ff;
5905
5906                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5907
5908                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5909                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5910                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5911                 sense_buf[19] = failing_lba & 0x000000ff;
5912         } else {
5913                 sense_buf[0] = 0x70;
5914                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5915                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5916                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5917
5918                 /* Illegal request */
5919                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5920                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5921                         sense_buf[7] = 10;      /* additional length */
5922
5923                         /* IOARCB was in error */
5924                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5925                                 sense_buf[15] = 0xC0;
5926                         else    /* Parameter data was invalid */
5927                                 sense_buf[15] = 0x80;
5928
5929                         sense_buf[16] =
5930                             ((IPR_FIELD_POINTER_MASK &
5931                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5932                         sense_buf[17] =
5933                             (IPR_FIELD_POINTER_MASK &
5934                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5935                 } else {
5936                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5937                                 if (ipr_is_vset_device(res))
5938                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5939                                 else
5940                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5941
5942                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
5943                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5944                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5945                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5946                                 sense_buf[6] = failing_lba & 0x000000ff;
5947                         }
5948
5949                         sense_buf[7] = 6;       /* additional length */
5950                 }
5951         }
5952 }
5953
5954 /**
5955  * ipr_get_autosense - Copy autosense data to sense buffer
5956  * @ipr_cmd:    ipr command struct
5957  *
5958  * This function copies the autosense buffer to the buffer
5959  * in the scsi_cmd, if there is autosense available.
5960  *
5961  * Return value:
5962  *      1 if autosense was available / 0 if not
5963  **/
5964 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5965 {
5966         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5967         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5968
5969         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5970                 return 0;
5971
5972         if (ipr_cmd->ioa_cfg->sis64)
5973                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5974                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5975                            SCSI_SENSE_BUFFERSIZE));
5976         else
5977                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5978                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5979                            SCSI_SENSE_BUFFERSIZE));
5980         return 1;
5981 }
5982
5983 /**
5984  * ipr_erp_start - Process an error response for a SCSI op
5985  * @ioa_cfg:    ioa config struct
5986  * @ipr_cmd:    ipr command struct
5987  *
5988  * This function determines whether or not to initiate ERP
5989  * on the affected device.
5990  *
5991  * Return value:
5992  *      nothing
5993  **/
5994 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5995                               struct ipr_cmnd *ipr_cmd)
5996 {
5997         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5998         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5999         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6000         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6001
6002         if (!res) {
6003                 ipr_scsi_eh_done(ipr_cmd);
6004                 return;
6005         }
6006
6007         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6008                 ipr_gen_sense(ipr_cmd);
6009
6010         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6011
6012         switch (masked_ioasc) {
6013         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6014                 if (ipr_is_naca_model(res))
6015                         scsi_cmd->result |= (DID_ABORT << 16);
6016                 else
6017                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6018                 break;
6019         case IPR_IOASC_IR_RESOURCE_HANDLE:
6020         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6021                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6022                 break;
6023         case IPR_IOASC_HW_SEL_TIMEOUT:
6024                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6025                 if (!ipr_is_naca_model(res))
6026                         res->needs_sync_complete = 1;
6027                 break;
6028         case IPR_IOASC_SYNC_REQUIRED:
6029                 if (!res->in_erp)
6030                         res->needs_sync_complete = 1;
6031                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6032                 break;
6033         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6034         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6035                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6036                 break;
6037         case IPR_IOASC_BUS_WAS_RESET:
6038         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6039                 /*
6040                  * Report the bus reset and ask for a retry. The device
6041                  * will give CC/UA the next command.
6042                  */
6043                 if (!res->resetting_device)
6044                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6045                 scsi_cmd->result |= (DID_ERROR << 16);
6046                 if (!ipr_is_naca_model(res))
6047                         res->needs_sync_complete = 1;
6048                 break;
6049         case IPR_IOASC_HW_DEV_BUS_STATUS:
6050                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6051                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6052                         if (!ipr_get_autosense(ipr_cmd)) {
6053                                 if (!ipr_is_naca_model(res)) {
6054                                         ipr_erp_cancel_all(ipr_cmd);
6055                                         return;
6056                                 }
6057                         }
6058                 }
6059                 if (!ipr_is_naca_model(res))
6060                         res->needs_sync_complete = 1;
6061                 break;
6062         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6063                 break;
6064         default:
6065                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6066                         scsi_cmd->result |= (DID_ERROR << 16);
6067                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6068                         res->needs_sync_complete = 1;
6069                 break;
6070         }
6071
6072         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6073         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6074         scsi_cmd->scsi_done(scsi_cmd);
6075 }
6076
6077 /**
6078  * ipr_scsi_done - mid-layer done function
6079  * @ipr_cmd:    ipr command struct
6080  *
6081  * This function is invoked by the interrupt handler for
6082  * ops generated by the SCSI mid-layer
6083  *
6084  * Return value:
6085  *      none
6086  **/
6087 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6088 {
6089         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6090         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6091         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6092         unsigned long hrrq_flags;
6093
6094         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6095
6096         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6097                 scsi_dma_unmap(scsi_cmd);
6098
6099                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6100                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6101                 scsi_cmd->scsi_done(scsi_cmd);
6102                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6103         } else {
6104                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6105                 ipr_erp_start(ioa_cfg, ipr_cmd);
6106                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6107         }
6108 }
6109
6110 /**
6111  * ipr_queuecommand - Queue a mid-layer request
6112  * @shost:              scsi host struct
6113  * @scsi_cmd:   scsi command struct
6114  *
6115  * This function queues a request generated by the mid-layer.
6116  *
6117  * Return value:
6118  *      0 on success
6119  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6120  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6121  **/
6122 static int ipr_queuecommand(struct Scsi_Host *shost,
6123                             struct scsi_cmnd *scsi_cmd)
6124 {
6125         struct ipr_ioa_cfg *ioa_cfg;
6126         struct ipr_resource_entry *res;
6127         struct ipr_ioarcb *ioarcb;
6128         struct ipr_cmnd *ipr_cmd;
6129         unsigned long hrrq_flags, lock_flags;
6130         int rc;
6131         struct ipr_hrr_queue *hrrq;
6132         int hrrq_id;
6133
6134         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6135
6136         scsi_cmd->result = (DID_OK << 16);
6137         res = scsi_cmd->device->hostdata;
6138
6139         if (ipr_is_gata(res) && res->sata_port) {
6140                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6141                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6143                 return rc;
6144         }
6145
6146         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6147         hrrq = &ioa_cfg->hrrq[hrrq_id];
6148
6149         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6150         /*
6151          * We are currently blocking all devices due to a host reset
6152          * We have told the host to stop giving us new requests, but
6153          * ERP ops don't count. FIXME
6154          */
6155         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6156                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6157                 return SCSI_MLQUEUE_HOST_BUSY;
6158         }
6159
6160         /*
6161          * FIXME - Create scsi_set_host_offline interface
6162          *  and the ioa_is_dead check can be removed
6163          */
6164         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6165                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6166                 goto err_nodev;
6167         }
6168
6169         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6170         if (ipr_cmd == NULL) {
6171                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6172                 return SCSI_MLQUEUE_HOST_BUSY;
6173         }
6174         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6175
6176         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6177         ioarcb = &ipr_cmd->ioarcb;
6178
6179         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6180         ipr_cmd->scsi_cmd = scsi_cmd;
6181         ipr_cmd->done = ipr_scsi_eh_done;
6182
6183         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6184                 if (scsi_cmd->underflow == 0)
6185                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6186
6187                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6188                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6189                         res->reset_occurred = 0;
6190                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6191                 }
6192                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6193                 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6194         }
6195
6196         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6197             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6198                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6199         }
6200
6201         if (ioa_cfg->sis64)
6202                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6203         else
6204                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6205
6206         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6207         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6208                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6209                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6210                 if (!rc)
6211                         scsi_dma_unmap(scsi_cmd);
6212                 return SCSI_MLQUEUE_HOST_BUSY;
6213         }
6214
6215         if (unlikely(hrrq->ioa_is_dead)) {
6216                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6217                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6218                 scsi_dma_unmap(scsi_cmd);
6219                 goto err_nodev;
6220         }
6221
6222         ioarcb->res_handle = res->res_handle;
6223         if (res->needs_sync_complete) {
6224                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6225                 res->needs_sync_complete = 0;
6226         }
6227         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6228         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6229         ipr_send_command(ipr_cmd);
6230         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6231         return 0;
6232
6233 err_nodev:
6234         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6235         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6236         scsi_cmd->result = (DID_NO_CONNECT << 16);
6237         scsi_cmd->scsi_done(scsi_cmd);
6238         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6239         return 0;
6240 }
6241
6242 /**
6243  * ipr_ioctl - IOCTL handler
6244  * @sdev:       scsi device struct
6245  * @cmd:        IOCTL cmd
6246  * @arg:        IOCTL arg
6247  *
6248  * Return value:
6249  *      0 on success / other on failure
6250  **/
6251 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6252 {
6253         struct ipr_resource_entry *res;
6254
6255         res = (struct ipr_resource_entry *)sdev->hostdata;
6256         if (res && ipr_is_gata(res)) {
6257                 if (cmd == HDIO_GET_IDENTITY)
6258                         return -ENOTTY;
6259                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6260         }
6261
6262         return -EINVAL;
6263 }
6264
6265 /**
6266  * ipr_info - Get information about the card/driver
6267  * @scsi_host:  scsi host struct
6268  *
6269  * Return value:
6270  *      pointer to buffer with description string
6271  **/
6272 static const char *ipr_ioa_info(struct Scsi_Host *host)
6273 {
6274         static char buffer[512];
6275         struct ipr_ioa_cfg *ioa_cfg;
6276         unsigned long lock_flags = 0;
6277
6278         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6279
6280         spin_lock_irqsave(host->host_lock, lock_flags);
6281         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6282         spin_unlock_irqrestore(host->host_lock, lock_flags);
6283
6284         return buffer;
6285 }
6286
6287 static struct scsi_host_template driver_template = {
6288         .module = THIS_MODULE,
6289         .name = "IPR",
6290         .info = ipr_ioa_info,
6291         .ioctl = ipr_ioctl,
6292         .queuecommand = ipr_queuecommand,
6293         .eh_abort_handler = ipr_eh_abort,
6294         .eh_device_reset_handler = ipr_eh_dev_reset,
6295         .eh_host_reset_handler = ipr_eh_host_reset,
6296         .slave_alloc = ipr_slave_alloc,
6297         .slave_configure = ipr_slave_configure,
6298         .slave_destroy = ipr_slave_destroy,
6299         .target_alloc = ipr_target_alloc,
6300         .target_destroy = ipr_target_destroy,
6301         .change_queue_depth = ipr_change_queue_depth,
6302         .change_queue_type = ipr_change_queue_type,
6303         .bios_param = ipr_biosparam,
6304         .can_queue = IPR_MAX_COMMANDS,
6305         .this_id = -1,
6306         .sg_tablesize = IPR_MAX_SGLIST,
6307         .max_sectors = IPR_IOA_MAX_SECTORS,
6308         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6309         .use_clustering = ENABLE_CLUSTERING,
6310         .shost_attrs = ipr_ioa_attrs,
6311         .sdev_attrs = ipr_dev_attrs,
6312         .proc_name = IPR_NAME,
6313         .no_write_same = 1,
6314 };
6315
6316 /**
6317  * ipr_ata_phy_reset - libata phy_reset handler
6318  * @ap:         ata port to reset
6319  *
6320  **/
6321 static void ipr_ata_phy_reset(struct ata_port *ap)
6322 {
6323         unsigned long flags;
6324         struct ipr_sata_port *sata_port = ap->private_data;
6325         struct ipr_resource_entry *res = sata_port->res;
6326         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6327         int rc;
6328
6329         ENTER;
6330         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6331         while (ioa_cfg->in_reset_reload) {
6332                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6333                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6334                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6335         }
6336
6337         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6338                 goto out_unlock;
6339
6340         rc = ipr_device_reset(ioa_cfg, res);
6341
6342         if (rc) {
6343                 ap->link.device[0].class = ATA_DEV_NONE;
6344                 goto out_unlock;
6345         }
6346
6347         ap->link.device[0].class = res->ata_class;
6348         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6349                 ap->link.device[0].class = ATA_DEV_NONE;
6350
6351 out_unlock:
6352         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6353         LEAVE;
6354 }
6355
6356 /**
6357  * ipr_ata_post_internal - Cleanup after an internal command
6358  * @qc: ATA queued command
6359  *
6360  * Return value:
6361  *      none
6362  **/
6363 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6364 {
6365         struct ipr_sata_port *sata_port = qc->ap->private_data;
6366         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6367         struct ipr_cmnd *ipr_cmd;
6368         struct ipr_hrr_queue *hrrq;
6369         unsigned long flags;
6370
6371         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6372         while (ioa_cfg->in_reset_reload) {
6373                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6374                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6375                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6376         }
6377
6378         for_each_hrrq(hrrq, ioa_cfg) {
6379                 spin_lock(&hrrq->_lock);
6380                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6381                         if (ipr_cmd->qc == qc) {
6382                                 ipr_device_reset(ioa_cfg, sata_port->res);
6383                                 break;
6384                         }
6385                 }
6386                 spin_unlock(&hrrq->_lock);
6387         }
6388         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6389 }
6390
6391 /**
6392  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6393  * @regs:       destination
6394  * @tf: source ATA taskfile
6395  *
6396  * Return value:
6397  *      none
6398  **/
6399 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6400                              struct ata_taskfile *tf)
6401 {
6402         regs->feature = tf->feature;
6403         regs->nsect = tf->nsect;
6404         regs->lbal = tf->lbal;
6405         regs->lbam = tf->lbam;
6406         regs->lbah = tf->lbah;
6407         regs->device = tf->device;
6408         regs->command = tf->command;
6409         regs->hob_feature = tf->hob_feature;
6410         regs->hob_nsect = tf->hob_nsect;
6411         regs->hob_lbal = tf->hob_lbal;
6412         regs->hob_lbam = tf->hob_lbam;
6413         regs->hob_lbah = tf->hob_lbah;
6414         regs->ctl = tf->ctl;
6415 }
6416
6417 /**
6418  * ipr_sata_done - done function for SATA commands
6419  * @ipr_cmd:    ipr command struct
6420  *
6421  * This function is invoked by the interrupt handler for
6422  * ops generated by the SCSI mid-layer to SATA devices
6423  *
6424  * Return value:
6425  *      none
6426  **/
6427 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6428 {
6429         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6430         struct ata_queued_cmd *qc = ipr_cmd->qc;
6431         struct ipr_sata_port *sata_port = qc->ap->private_data;
6432         struct ipr_resource_entry *res = sata_port->res;
6433         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6434
6435         spin_lock(&ipr_cmd->hrrq->_lock);
6436         if (ipr_cmd->ioa_cfg->sis64)
6437                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6438                        sizeof(struct ipr_ioasa_gata));
6439         else
6440                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6441                        sizeof(struct ipr_ioasa_gata));
6442         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6443
6444         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6445                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6446
6447         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6448                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6449         else
6450                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6451         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6452         spin_unlock(&ipr_cmd->hrrq->_lock);
6453         ata_qc_complete(qc);
6454 }
6455
6456 /**
6457  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6458  * @ipr_cmd:    ipr command struct
6459  * @qc:         ATA queued command
6460  *
6461  **/
6462 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6463                                   struct ata_queued_cmd *qc)
6464 {
6465         u32 ioadl_flags = 0;
6466         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6467         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6468         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6469         int len = qc->nbytes;
6470         struct scatterlist *sg;
6471         unsigned int si;
6472         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6473
6474         if (len == 0)
6475                 return;
6476
6477         if (qc->dma_dir == DMA_TO_DEVICE) {
6478                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6479                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6480         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6481                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6482
6483         ioarcb->data_transfer_length = cpu_to_be32(len);
6484         ioarcb->ioadl_len =
6485                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6486         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6487                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6488
6489         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6490                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6491                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6492                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6493
6494                 last_ioadl64 = ioadl64;
6495                 ioadl64++;
6496         }
6497
6498         if (likely(last_ioadl64))
6499                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6500 }
6501
6502 /**
6503  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6504  * @ipr_cmd:    ipr command struct
6505  * @qc:         ATA queued command
6506  *
6507  **/
6508 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6509                                 struct ata_queued_cmd *qc)
6510 {
6511         u32 ioadl_flags = 0;
6512         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6513         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6514         struct ipr_ioadl_desc *last_ioadl = NULL;
6515         int len = qc->nbytes;
6516         struct scatterlist *sg;
6517         unsigned int si;
6518
6519         if (len == 0)
6520                 return;
6521
6522         if (qc->dma_dir == DMA_TO_DEVICE) {
6523                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6524                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6525                 ioarcb->data_transfer_length = cpu_to_be32(len);
6526                 ioarcb->ioadl_len =
6527                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6528         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6529                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6530                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6531                 ioarcb->read_ioadl_len =
6532                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6533         }
6534
6535         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6536                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6537                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6538
6539                 last_ioadl = ioadl;
6540                 ioadl++;
6541         }
6542
6543         if (likely(last_ioadl))
6544                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6545 }
6546
6547 /**
6548  * ipr_qc_defer - Get a free ipr_cmd
6549  * @qc: queued command
6550  *
6551  * Return value:
6552  *      0 if success
6553  **/
6554 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6555 {
6556         struct ata_port *ap = qc->ap;
6557         struct ipr_sata_port *sata_port = ap->private_data;
6558         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6559         struct ipr_cmnd *ipr_cmd;
6560         struct ipr_hrr_queue *hrrq;
6561         int hrrq_id;
6562
6563         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6564         hrrq = &ioa_cfg->hrrq[hrrq_id];
6565
6566         qc->lldd_task = NULL;
6567         spin_lock(&hrrq->_lock);
6568         if (unlikely(hrrq->ioa_is_dead)) {
6569                 spin_unlock(&hrrq->_lock);
6570                 return 0;
6571         }
6572
6573         if (unlikely(!hrrq->allow_cmds)) {
6574                 spin_unlock(&hrrq->_lock);
6575                 return ATA_DEFER_LINK;
6576         }
6577
6578         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6579         if (ipr_cmd == NULL) {
6580                 spin_unlock(&hrrq->_lock);
6581                 return ATA_DEFER_LINK;
6582         }
6583
6584         qc->lldd_task = ipr_cmd;
6585         spin_unlock(&hrrq->_lock);
6586         return 0;
6587 }
6588
6589 /**
6590  * ipr_qc_issue - Issue a SATA qc to a device
6591  * @qc: queued command
6592  *
6593  * Return value:
6594  *      0 if success
6595  **/
6596 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6597 {
6598         struct ata_port *ap = qc->ap;
6599         struct ipr_sata_port *sata_port = ap->private_data;
6600         struct ipr_resource_entry *res = sata_port->res;
6601         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6602         struct ipr_cmnd *ipr_cmd;
6603         struct ipr_ioarcb *ioarcb;
6604         struct ipr_ioarcb_ata_regs *regs;
6605
6606         if (qc->lldd_task == NULL)
6607                 ipr_qc_defer(qc);
6608
6609         ipr_cmd = qc->lldd_task;
6610         if (ipr_cmd == NULL)
6611                 return AC_ERR_SYSTEM;
6612
6613         qc->lldd_task = NULL;
6614         spin_lock(&ipr_cmd->hrrq->_lock);
6615         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6616                         ipr_cmd->hrrq->ioa_is_dead)) {
6617                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6618                 spin_unlock(&ipr_cmd->hrrq->_lock);
6619                 return AC_ERR_SYSTEM;
6620         }
6621
6622         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6623         ioarcb = &ipr_cmd->ioarcb;
6624
6625         if (ioa_cfg->sis64) {
6626                 regs = &ipr_cmd->i.ata_ioadl.regs;
6627                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6628         } else
6629                 regs = &ioarcb->u.add_data.u.regs;
6630
6631         memset(regs, 0, sizeof(*regs));
6632         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6633
6634         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6635         ipr_cmd->qc = qc;
6636         ipr_cmd->done = ipr_sata_done;
6637         ipr_cmd->ioarcb.res_handle = res->res_handle;
6638         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6639         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6640         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6641         ipr_cmd->dma_use_sg = qc->n_elem;
6642
6643         if (ioa_cfg->sis64)
6644                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6645         else
6646                 ipr_build_ata_ioadl(ipr_cmd, qc);
6647
6648         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6649         ipr_copy_sata_tf(regs, &qc->tf);
6650         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6651         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6652
6653         switch (qc->tf.protocol) {
6654         case ATA_PROT_NODATA:
6655         case ATA_PROT_PIO:
6656                 break;
6657
6658         case ATA_PROT_DMA:
6659                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6660                 break;
6661
6662         case ATAPI_PROT_PIO:
6663         case ATAPI_PROT_NODATA:
6664                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6665                 break;
6666
6667         case ATAPI_PROT_DMA:
6668                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6669                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6670                 break;
6671
6672         default:
6673                 WARN_ON(1);
6674                 spin_unlock(&ipr_cmd->hrrq->_lock);
6675                 return AC_ERR_INVALID;
6676         }
6677
6678         ipr_send_command(ipr_cmd);
6679         spin_unlock(&ipr_cmd->hrrq->_lock);
6680
6681         return 0;
6682 }
6683
6684 /**
6685  * ipr_qc_fill_rtf - Read result TF
6686  * @qc: ATA queued command
6687  *
6688  * Return value:
6689  *      true
6690  **/
6691 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6692 {
6693         struct ipr_sata_port *sata_port = qc->ap->private_data;
6694         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6695         struct ata_taskfile *tf = &qc->result_tf;
6696
6697         tf->feature = g->error;
6698         tf->nsect = g->nsect;
6699         tf->lbal = g->lbal;
6700         tf->lbam = g->lbam;
6701         tf->lbah = g->lbah;
6702         tf->device = g->device;
6703         tf->command = g->status;
6704         tf->hob_nsect = g->hob_nsect;
6705         tf->hob_lbal = g->hob_lbal;
6706         tf->hob_lbam = g->hob_lbam;
6707         tf->hob_lbah = g->hob_lbah;
6708
6709         return true;
6710 }
6711
6712 static struct ata_port_operations ipr_sata_ops = {
6713         .phy_reset = ipr_ata_phy_reset,
6714         .hardreset = ipr_sata_reset,
6715         .post_internal_cmd = ipr_ata_post_internal,
6716         .qc_prep = ata_noop_qc_prep,
6717         .qc_defer = ipr_qc_defer,
6718         .qc_issue = ipr_qc_issue,
6719         .qc_fill_rtf = ipr_qc_fill_rtf,
6720         .port_start = ata_sas_port_start,
6721         .port_stop = ata_sas_port_stop
6722 };
6723
6724 static struct ata_port_info sata_port_info = {
6725         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6726         .pio_mask       = ATA_PIO4_ONLY,
6727         .mwdma_mask     = ATA_MWDMA2,
6728         .udma_mask      = ATA_UDMA6,
6729         .port_ops       = &ipr_sata_ops
6730 };
6731
6732 #ifdef CONFIG_PPC_PSERIES
6733 static const u16 ipr_blocked_processors[] = {
6734         PVR_NORTHSTAR,
6735         PVR_PULSAR,
6736         PVR_POWER4,
6737         PVR_ICESTAR,
6738         PVR_SSTAR,
6739         PVR_POWER4p,
6740         PVR_630,
6741         PVR_630p
6742 };
6743
6744 /**
6745  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6746  * @ioa_cfg:    ioa cfg struct
6747  *
6748  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6749  * certain pSeries hardware. This function determines if the given
6750  * adapter is in one of these confgurations or not.
6751  *
6752  * Return value:
6753  *      1 if adapter is not supported / 0 if adapter is supported
6754  **/
6755 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6756 {
6757         int i;
6758
6759         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6760                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6761                         if (pvr_version_is(ipr_blocked_processors[i]))
6762                                 return 1;
6763                 }
6764         }
6765         return 0;
6766 }
6767 #else
6768 #define ipr_invalid_adapter(ioa_cfg) 0
6769 #endif
6770
6771 /**
6772  * ipr_ioa_bringdown_done - IOA bring down completion.
6773  * @ipr_cmd:    ipr command struct
6774  *
6775  * This function processes the completion of an adapter bring down.
6776  * It wakes any reset sleepers.
6777  *
6778  * Return value:
6779  *      IPR_RC_JOB_RETURN
6780  **/
6781 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6782 {
6783         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6784         int i;
6785
6786         ENTER;
6787         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6788                 ipr_trace;
6789                 spin_unlock_irq(ioa_cfg->host->host_lock);
6790                 scsi_unblock_requests(ioa_cfg->host);
6791                 spin_lock_irq(ioa_cfg->host->host_lock);
6792         }
6793
6794         ioa_cfg->in_reset_reload = 0;
6795         ioa_cfg->reset_retries = 0;
6796         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6797                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6798                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6799                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6800         }
6801         wmb();
6802
6803         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6804         wake_up_all(&ioa_cfg->reset_wait_q);
6805         LEAVE;
6806
6807         return IPR_RC_JOB_RETURN;
6808 }
6809
6810 /**
6811  * ipr_ioa_reset_done - IOA reset completion.
6812  * @ipr_cmd:    ipr command struct
6813  *
6814  * This function processes the completion of an adapter reset.
6815  * It schedules any necessary mid-layer add/removes and
6816  * wakes any reset sleepers.
6817  *
6818  * Return value:
6819  *      IPR_RC_JOB_RETURN
6820  **/
6821 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6822 {
6823         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6824         struct ipr_resource_entry *res;
6825         struct ipr_hostrcb *hostrcb, *temp;
6826         int i = 0, j;
6827
6828         ENTER;
6829         ioa_cfg->in_reset_reload = 0;
6830         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6831                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6832                 ioa_cfg->hrrq[j].allow_cmds = 1;
6833                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6834         }
6835         wmb();
6836         ioa_cfg->reset_cmd = NULL;
6837         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6838
6839         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6840                 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6841                         ipr_trace;
6842                         break;
6843                 }
6844         }
6845         schedule_work(&ioa_cfg->work_q);
6846
6847         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6848                 list_del(&hostrcb->queue);
6849                 if (i++ < IPR_NUM_LOG_HCAMS)
6850                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6851                 else
6852                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6853         }
6854
6855         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6856         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6857
6858         ioa_cfg->reset_retries = 0;
6859         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6860         wake_up_all(&ioa_cfg->reset_wait_q);
6861
6862         spin_unlock(ioa_cfg->host->host_lock);
6863         scsi_unblock_requests(ioa_cfg->host);
6864         spin_lock(ioa_cfg->host->host_lock);
6865
6866         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6867                 scsi_block_requests(ioa_cfg->host);
6868
6869         LEAVE;
6870         return IPR_RC_JOB_RETURN;
6871 }
6872
6873 /**
6874  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6875  * @supported_dev:      supported device struct
6876  * @vpids:                      vendor product id struct
6877  *
6878  * Return value:
6879  *      none
6880  **/
6881 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6882                                  struct ipr_std_inq_vpids *vpids)
6883 {
6884         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6885         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6886         supported_dev->num_records = 1;
6887         supported_dev->data_length =
6888                 cpu_to_be16(sizeof(struct ipr_supported_device));
6889         supported_dev->reserved = 0;
6890 }
6891
6892 /**
6893  * ipr_set_supported_devs - Send Set Supported Devices for a device
6894  * @ipr_cmd:    ipr command struct
6895  *
6896  * This function sends a Set Supported Devices to the adapter
6897  *
6898  * Return value:
6899  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6900  **/
6901 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6902 {
6903         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6904         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6905         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6906         struct ipr_resource_entry *res = ipr_cmd->u.res;
6907
6908         ipr_cmd->job_step = ipr_ioa_reset_done;
6909
6910         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6911                 if (!ipr_is_scsi_disk(res))
6912                         continue;
6913
6914                 ipr_cmd->u.res = res;
6915                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6916
6917                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6918                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6919                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6920
6921                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6922                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6923                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6924                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6925
6926                 ipr_init_ioadl(ipr_cmd,
6927                                ioa_cfg->vpd_cbs_dma +
6928                                  offsetof(struct ipr_misc_cbs, supp_dev),
6929                                sizeof(struct ipr_supported_device),
6930                                IPR_IOADL_FLAGS_WRITE_LAST);
6931
6932                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6933                            IPR_SET_SUP_DEVICE_TIMEOUT);
6934
6935                 if (!ioa_cfg->sis64)
6936                         ipr_cmd->job_step = ipr_set_supported_devs;
6937                 LEAVE;
6938                 return IPR_RC_JOB_RETURN;
6939         }
6940
6941         LEAVE;
6942         return IPR_RC_JOB_CONTINUE;
6943 }
6944
6945 /**
6946  * ipr_get_mode_page - Locate specified mode page
6947  * @mode_pages: mode page buffer
6948  * @page_code:  page code to find
6949  * @len:                minimum required length for mode page
6950  *
6951  * Return value:
6952  *      pointer to mode page / NULL on failure
6953  **/
6954 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6955                                u32 page_code, u32 len)
6956 {
6957         struct ipr_mode_page_hdr *mode_hdr;
6958         u32 page_length;
6959         u32 length;
6960
6961         if (!mode_pages || (mode_pages->hdr.length == 0))
6962                 return NULL;
6963
6964         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6965         mode_hdr = (struct ipr_mode_page_hdr *)
6966                 (mode_pages->data + mode_pages->hdr.block_desc_len);
6967
6968         while (length) {
6969                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6970                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6971                                 return mode_hdr;
6972                         break;
6973                 } else {
6974                         page_length = (sizeof(struct ipr_mode_page_hdr) +
6975                                        mode_hdr->page_length);
6976                         length -= page_length;
6977                         mode_hdr = (struct ipr_mode_page_hdr *)
6978                                 ((unsigned long)mode_hdr + page_length);
6979                 }
6980         }
6981         return NULL;
6982 }
6983
6984 /**
6985  * ipr_check_term_power - Check for term power errors
6986  * @ioa_cfg:    ioa config struct
6987  * @mode_pages: IOAFP mode pages buffer
6988  *
6989  * Check the IOAFP's mode page 28 for term power errors
6990  *
6991  * Return value:
6992  *      nothing
6993  **/
6994 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6995                                  struct ipr_mode_pages *mode_pages)
6996 {
6997         int i;
6998         int entry_length;
6999         struct ipr_dev_bus_entry *bus;
7000         struct ipr_mode_page28 *mode_page;
7001
7002         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7003                                       sizeof(struct ipr_mode_page28));
7004
7005         entry_length = mode_page->entry_length;
7006
7007         bus = mode_page->bus;
7008
7009         for (i = 0; i < mode_page->num_entries; i++) {
7010                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7011                         dev_err(&ioa_cfg->pdev->dev,
7012                                 "Term power is absent on scsi bus %d\n",
7013                                 bus->res_addr.bus);
7014                 }
7015
7016                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7017         }
7018 }
7019
7020 /**
7021  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7022  * @ioa_cfg:    ioa config struct
7023  *
7024  * Looks through the config table checking for SES devices. If
7025  * the SES device is in the SES table indicating a maximum SCSI
7026  * bus speed, the speed is limited for the bus.
7027  *
7028  * Return value:
7029  *      none
7030  **/
7031 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7032 {
7033         u32 max_xfer_rate;
7034         int i;
7035
7036         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7037                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7038                                                        ioa_cfg->bus_attr[i].bus_width);
7039
7040                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7041                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7042         }
7043 }
7044
7045 /**
7046  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7047  * @ioa_cfg:    ioa config struct
7048  * @mode_pages: mode page 28 buffer
7049  *
7050  * Updates mode page 28 based on driver configuration
7051  *
7052  * Return value:
7053  *      none
7054  **/
7055 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7056                                           struct ipr_mode_pages *mode_pages)
7057 {
7058         int i, entry_length;
7059         struct ipr_dev_bus_entry *bus;
7060         struct ipr_bus_attributes *bus_attr;
7061         struct ipr_mode_page28 *mode_page;
7062
7063         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7064                                       sizeof(struct ipr_mode_page28));
7065
7066         entry_length = mode_page->entry_length;
7067
7068         /* Loop for each device bus entry */
7069         for (i = 0, bus = mode_page->bus;
7070              i < mode_page->num_entries;
7071              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7072                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7073                         dev_err(&ioa_cfg->pdev->dev,
7074                                 "Invalid resource address reported: 0x%08X\n",
7075                                 IPR_GET_PHYS_LOC(bus->res_addr));
7076                         continue;
7077                 }
7078
7079                 bus_attr = &ioa_cfg->bus_attr[i];
7080                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7081                 bus->bus_width = bus_attr->bus_width;
7082                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7083                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7084                 if (bus_attr->qas_enabled)
7085                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7086                 else
7087                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7088         }
7089 }
7090
7091 /**
7092  * ipr_build_mode_select - Build a mode select command
7093  * @ipr_cmd:    ipr command struct
7094  * @res_handle: resource handle to send command to
7095  * @parm:               Byte 2 of Mode Sense command
7096  * @dma_addr:   DMA buffer address
7097  * @xfer_len:   data transfer length
7098  *
7099  * Return value:
7100  *      none
7101  **/
7102 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7103                                   __be32 res_handle, u8 parm,
7104                                   dma_addr_t dma_addr, u8 xfer_len)
7105 {
7106         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7107
7108         ioarcb->res_handle = res_handle;
7109         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7110         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7111         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7112         ioarcb->cmd_pkt.cdb[1] = parm;
7113         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7114
7115         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7116 }
7117
7118 /**
7119  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7120  * @ipr_cmd:    ipr command struct
7121  *
7122  * This function sets up the SCSI bus attributes and sends
7123  * a Mode Select for Page 28 to activate them.
7124  *
7125  * Return value:
7126  *      IPR_RC_JOB_RETURN
7127  **/
7128 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7129 {
7130         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7131         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7132         int length;
7133
7134         ENTER;
7135         ipr_scsi_bus_speed_limit(ioa_cfg);
7136         ipr_check_term_power(ioa_cfg, mode_pages);
7137         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7138         length = mode_pages->hdr.length + 1;
7139         mode_pages->hdr.length = 0;
7140
7141         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7142                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7143                               length);
7144
7145         ipr_cmd->job_step = ipr_set_supported_devs;
7146         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7147                                     struct ipr_resource_entry, queue);
7148         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7149
7150         LEAVE;
7151         return IPR_RC_JOB_RETURN;
7152 }
7153
7154 /**
7155  * ipr_build_mode_sense - Builds a mode sense command
7156  * @ipr_cmd:    ipr command struct
7157  * @res:                resource entry struct
7158  * @parm:               Byte 2 of mode sense command
7159  * @dma_addr:   DMA address of mode sense buffer
7160  * @xfer_len:   Size of DMA buffer
7161  *
7162  * Return value:
7163  *      none
7164  **/
7165 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7166                                  __be32 res_handle,
7167                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7168 {
7169         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7170
7171         ioarcb->res_handle = res_handle;
7172         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7173         ioarcb->cmd_pkt.cdb[2] = parm;
7174         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7175         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7176
7177         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7178 }
7179
7180 /**
7181  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7182  * @ipr_cmd:    ipr command struct
7183  *
7184  * This function handles the failure of an IOA bringup command.
7185  *
7186  * Return value:
7187  *      IPR_RC_JOB_RETURN
7188  **/
7189 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7190 {
7191         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7192         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7193
7194         dev_err(&ioa_cfg->pdev->dev,
7195                 "0x%02X failed with IOASC: 0x%08X\n",
7196                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7197
7198         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7199         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7200         return IPR_RC_JOB_RETURN;
7201 }
7202
7203 /**
7204  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7205  * @ipr_cmd:    ipr command struct
7206  *
7207  * This function handles the failure of a Mode Sense to the IOAFP.
7208  * Some adapters do not handle all mode pages.
7209  *
7210  * Return value:
7211  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7212  **/
7213 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7214 {
7215         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7216         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7217
7218         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7219                 ipr_cmd->job_step = ipr_set_supported_devs;
7220                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7221                                             struct ipr_resource_entry, queue);
7222                 return IPR_RC_JOB_CONTINUE;
7223         }
7224
7225         return ipr_reset_cmd_failed(ipr_cmd);
7226 }
7227
7228 /**
7229  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7230  * @ipr_cmd:    ipr command struct
7231  *
7232  * This function send a Page 28 mode sense to the IOA to
7233  * retrieve SCSI bus attributes.
7234  *
7235  * Return value:
7236  *      IPR_RC_JOB_RETURN
7237  **/
7238 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7239 {
7240         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7241
7242         ENTER;
7243         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7244                              0x28, ioa_cfg->vpd_cbs_dma +
7245                              offsetof(struct ipr_misc_cbs, mode_pages),
7246                              sizeof(struct ipr_mode_pages));
7247
7248         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7249         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7250
7251         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7252
7253         LEAVE;
7254         return IPR_RC_JOB_RETURN;
7255 }
7256
7257 /**
7258  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7259  * @ipr_cmd:    ipr command struct
7260  *
7261  * This function enables dual IOA RAID support if possible.
7262  *
7263  * Return value:
7264  *      IPR_RC_JOB_RETURN
7265  **/
7266 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7267 {
7268         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7269         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7270         struct ipr_mode_page24 *mode_page;
7271         int length;
7272
7273         ENTER;
7274         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7275                                       sizeof(struct ipr_mode_page24));
7276
7277         if (mode_page)
7278                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7279
7280         length = mode_pages->hdr.length + 1;
7281         mode_pages->hdr.length = 0;
7282
7283         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7284                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7285                               length);
7286
7287         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7288         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7289
7290         LEAVE;
7291         return IPR_RC_JOB_RETURN;
7292 }
7293
7294 /**
7295  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7296  * @ipr_cmd:    ipr command struct
7297  *
7298  * This function handles the failure of a Mode Sense to the IOAFP.
7299  * Some adapters do not handle all mode pages.
7300  *
7301  * Return value:
7302  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7303  **/
7304 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7305 {
7306         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7307
7308         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7309                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7310                 return IPR_RC_JOB_CONTINUE;
7311         }
7312
7313         return ipr_reset_cmd_failed(ipr_cmd);
7314 }
7315
7316 /**
7317  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7318  * @ipr_cmd:    ipr command struct
7319  *
7320  * This function send a mode sense to the IOA to retrieve
7321  * the IOA Advanced Function Control mode page.
7322  *
7323  * Return value:
7324  *      IPR_RC_JOB_RETURN
7325  **/
7326 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7327 {
7328         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7329
7330         ENTER;
7331         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7332                              0x24, ioa_cfg->vpd_cbs_dma +
7333                              offsetof(struct ipr_misc_cbs, mode_pages),
7334                              sizeof(struct ipr_mode_pages));
7335
7336         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7337         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7338
7339         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7340
7341         LEAVE;
7342         return IPR_RC_JOB_RETURN;
7343 }
7344
7345 /**
7346  * ipr_init_res_table - Initialize the resource table
7347  * @ipr_cmd:    ipr command struct
7348  *
7349  * This function looks through the existing resource table, comparing
7350  * it with the config table. This function will take care of old/new
7351  * devices and schedule adding/removing them from the mid-layer
7352  * as appropriate.
7353  *
7354  * Return value:
7355  *      IPR_RC_JOB_CONTINUE
7356  **/
7357 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7358 {
7359         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7360         struct ipr_resource_entry *res, *temp;
7361         struct ipr_config_table_entry_wrapper cfgtew;
7362         int entries, found, flag, i;
7363         LIST_HEAD(old_res);
7364
7365         ENTER;
7366         if (ioa_cfg->sis64)
7367                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7368         else
7369                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7370
7371         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7372                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7373
7374         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7375                 list_move_tail(&res->queue, &old_res);
7376
7377         if (ioa_cfg->sis64)
7378                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7379         else
7380                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7381
7382         for (i = 0; i < entries; i++) {
7383                 if (ioa_cfg->sis64)
7384                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7385                 else
7386                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7387                 found = 0;
7388
7389                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7390                         if (ipr_is_same_device(res, &cfgtew)) {
7391                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7392                                 found = 1;
7393                                 break;
7394                         }
7395                 }
7396
7397                 if (!found) {
7398                         if (list_empty(&ioa_cfg->free_res_q)) {
7399                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7400                                 break;
7401                         }
7402
7403                         found = 1;
7404                         res = list_entry(ioa_cfg->free_res_q.next,
7405                                          struct ipr_resource_entry, queue);
7406                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7407                         ipr_init_res_entry(res, &cfgtew);
7408                         res->add_to_ml = 1;
7409                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7410                         res->sdev->allow_restart = 1;
7411
7412                 if (found)
7413                         ipr_update_res_entry(res, &cfgtew);
7414         }
7415
7416         list_for_each_entry_safe(res, temp, &old_res, queue) {
7417                 if (res->sdev) {
7418                         res->del_from_ml = 1;
7419                         res->res_handle = IPR_INVALID_RES_HANDLE;
7420                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7421                 }
7422         }
7423
7424         list_for_each_entry_safe(res, temp, &old_res, queue) {
7425                 ipr_clear_res_target(res);
7426                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7427         }
7428
7429         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7430                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7431         else
7432                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7433
7434         LEAVE;
7435         return IPR_RC_JOB_CONTINUE;
7436 }
7437
7438 /**
7439  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7440  * @ipr_cmd:    ipr command struct
7441  *
7442  * This function sends a Query IOA Configuration command
7443  * to the adapter to retrieve the IOA configuration table.
7444  *
7445  * Return value:
7446  *      IPR_RC_JOB_RETURN
7447  **/
7448 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7449 {
7450         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7451         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7452         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7453         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7454
7455         ENTER;
7456         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7457                 ioa_cfg->dual_raid = 1;
7458         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7459                  ucode_vpd->major_release, ucode_vpd->card_type,
7460                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7461         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7462         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7463
7464         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7465         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7466         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7467         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7468
7469         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7470                        IPR_IOADL_FLAGS_READ_LAST);
7471
7472         ipr_cmd->job_step = ipr_init_res_table;
7473
7474         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7475
7476         LEAVE;
7477         return IPR_RC_JOB_RETURN;
7478 }
7479
7480 /**
7481  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7482  * @ipr_cmd:    ipr command struct
7483  *
7484  * This utility function sends an inquiry to the adapter.
7485  *
7486  * Return value:
7487  *      none
7488  **/
7489 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7490                               dma_addr_t dma_addr, u8 xfer_len)
7491 {
7492         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7493
7494         ENTER;
7495         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7496         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7497
7498         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7499         ioarcb->cmd_pkt.cdb[1] = flags;
7500         ioarcb->cmd_pkt.cdb[2] = page;
7501         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7502
7503         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7504
7505         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7506         LEAVE;
7507 }
7508
7509 /**
7510  * ipr_inquiry_page_supported - Is the given inquiry page supported
7511  * @page0:              inquiry page 0 buffer
7512  * @page:               page code.
7513  *
7514  * This function determines if the specified inquiry page is supported.
7515  *
7516  * Return value:
7517  *      1 if page is supported / 0 if not
7518  **/
7519 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7520 {
7521         int i;
7522
7523         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7524                 if (page0->page[i] == page)
7525                         return 1;
7526
7527         return 0;
7528 }
7529
7530 /**
7531  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7532  * @ipr_cmd:    ipr command struct
7533  *
7534  * This function sends a Page 0xD0 inquiry to the adapter
7535  * to retrieve adapter capabilities.
7536  *
7537  * Return value:
7538  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7539  **/
7540 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7541 {
7542         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7543         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7544         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7545
7546         ENTER;
7547         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7548         memset(cap, 0, sizeof(*cap));
7549
7550         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7551                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7552                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7553                                   sizeof(struct ipr_inquiry_cap));
7554                 return IPR_RC_JOB_RETURN;
7555         }
7556
7557         LEAVE;
7558         return IPR_RC_JOB_CONTINUE;
7559 }
7560
7561 /**
7562  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7563  * @ipr_cmd:    ipr command struct
7564  *
7565  * This function sends a Page 3 inquiry to the adapter
7566  * to retrieve software VPD information.
7567  *
7568  * Return value:
7569  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7570  **/
7571 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7572 {
7573         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7574
7575         ENTER;
7576
7577         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7578
7579         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7580                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7581                           sizeof(struct ipr_inquiry_page3));
7582
7583         LEAVE;
7584         return IPR_RC_JOB_RETURN;
7585 }
7586
7587 /**
7588  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7589  * @ipr_cmd:    ipr command struct
7590  *
7591  * This function sends a Page 0 inquiry to the adapter
7592  * to retrieve supported inquiry pages.
7593  *
7594  * Return value:
7595  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7596  **/
7597 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7598 {
7599         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7600         char type[5];
7601
7602         ENTER;
7603
7604         /* Grab the type out of the VPD and store it away */
7605         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7606         type[4] = '\0';
7607         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7608
7609         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7610
7611         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7612                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7613                           sizeof(struct ipr_inquiry_page0));
7614
7615         LEAVE;
7616         return IPR_RC_JOB_RETURN;
7617 }
7618
7619 /**
7620  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7621  * @ipr_cmd:    ipr command struct
7622  *
7623  * This function sends a standard inquiry to the adapter.
7624  *
7625  * Return value:
7626  *      IPR_RC_JOB_RETURN
7627  **/
7628 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7629 {
7630         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7631
7632         ENTER;
7633         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7634
7635         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7636                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7637                           sizeof(struct ipr_ioa_vpd));
7638
7639         LEAVE;
7640         return IPR_RC_JOB_RETURN;
7641 }
7642
7643 /**
7644  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7645  * @ipr_cmd:    ipr command struct
7646  *
7647  * This function send an Identify Host Request Response Queue
7648  * command to establish the HRRQ with the adapter.
7649  *
7650  * Return value:
7651  *      IPR_RC_JOB_RETURN
7652  **/
7653 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7654 {
7655         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7656         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7657         struct ipr_hrr_queue *hrrq;
7658
7659         ENTER;
7660         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7661         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7662
7663         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7664                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7665
7666                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7667                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7668
7669                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7670                 if (ioa_cfg->sis64)
7671                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7672
7673                 if (ioa_cfg->nvectors == 1)
7674                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7675                 else
7676                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7677
7678                 ioarcb->cmd_pkt.cdb[2] =
7679                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7680                 ioarcb->cmd_pkt.cdb[3] =
7681                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7682                 ioarcb->cmd_pkt.cdb[4] =
7683                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7684                 ioarcb->cmd_pkt.cdb[5] =
7685                         ((u64) hrrq->host_rrq_dma) & 0xff;
7686                 ioarcb->cmd_pkt.cdb[7] =
7687                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7688                 ioarcb->cmd_pkt.cdb[8] =
7689                         (sizeof(u32) * hrrq->size) & 0xff;
7690
7691                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7692                         ioarcb->cmd_pkt.cdb[9] =
7693                                         ioa_cfg->identify_hrrq_index;
7694
7695                 if (ioa_cfg->sis64) {
7696                         ioarcb->cmd_pkt.cdb[10] =
7697                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7698                         ioarcb->cmd_pkt.cdb[11] =
7699                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7700                         ioarcb->cmd_pkt.cdb[12] =
7701                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7702                         ioarcb->cmd_pkt.cdb[13] =
7703                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7704                 }
7705
7706                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7707                         ioarcb->cmd_pkt.cdb[14] =
7708                                         ioa_cfg->identify_hrrq_index;
7709
7710                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7711                            IPR_INTERNAL_TIMEOUT);
7712
7713                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7714                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7715
7716                 LEAVE;
7717                 return IPR_RC_JOB_RETURN;
7718         }
7719
7720         LEAVE;
7721         return IPR_RC_JOB_CONTINUE;
7722 }
7723
7724 /**
7725  * ipr_reset_timer_done - Adapter reset timer function
7726  * @ipr_cmd:    ipr command struct
7727  *
7728  * Description: This function is used in adapter reset processing
7729  * for timing events. If the reset_cmd pointer in the IOA
7730  * config struct is not this adapter's we are doing nested
7731  * resets and fail_all_ops will take care of freeing the
7732  * command block.
7733  *
7734  * Return value:
7735  *      none
7736  **/
7737 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7738 {
7739         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7740         unsigned long lock_flags = 0;
7741
7742         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7743
7744         if (ioa_cfg->reset_cmd == ipr_cmd) {
7745                 list_del(&ipr_cmd->queue);
7746                 ipr_cmd->done(ipr_cmd);
7747         }
7748
7749         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7750 }
7751
7752 /**
7753  * ipr_reset_start_timer - Start a timer for adapter reset job
7754  * @ipr_cmd:    ipr command struct
7755  * @timeout:    timeout value
7756  *
7757  * Description: This function is used in adapter reset processing
7758  * for timing events. If the reset_cmd pointer in the IOA
7759  * config struct is not this adapter's we are doing nested
7760  * resets and fail_all_ops will take care of freeing the
7761  * command block.
7762  *
7763  * Return value:
7764  *      none
7765  **/
7766 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7767                                   unsigned long timeout)
7768 {
7769
7770         ENTER;
7771         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7772         ipr_cmd->done = ipr_reset_ioa_job;
7773
7774         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7775         ipr_cmd->timer.expires = jiffies + timeout;
7776         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7777         add_timer(&ipr_cmd->timer);
7778 }
7779
7780 /**
7781  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7782  * @ioa_cfg:    ioa cfg struct
7783  *
7784  * Return value:
7785  *      nothing
7786  **/
7787 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7788 {
7789         struct ipr_hrr_queue *hrrq;
7790
7791         for_each_hrrq(hrrq, ioa_cfg) {
7792                 spin_lock(&hrrq->_lock);
7793                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7794
7795                 /* Initialize Host RRQ pointers */
7796                 hrrq->hrrq_start = hrrq->host_rrq;
7797                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7798                 hrrq->hrrq_curr = hrrq->hrrq_start;
7799                 hrrq->toggle_bit = 1;
7800                 spin_unlock(&hrrq->_lock);
7801         }
7802         wmb();
7803
7804         ioa_cfg->identify_hrrq_index = 0;
7805         if (ioa_cfg->hrrq_num == 1)
7806                 atomic_set(&ioa_cfg->hrrq_index, 0);
7807         else
7808                 atomic_set(&ioa_cfg->hrrq_index, 1);
7809
7810         /* Zero out config table */
7811         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7812 }
7813
7814 /**
7815  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7816  * @ipr_cmd:    ipr command struct
7817  *
7818  * Return value:
7819  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7820  **/
7821 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7822 {
7823         unsigned long stage, stage_time;
7824         u32 feedback;
7825         volatile u32 int_reg;
7826         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7827         u64 maskval = 0;
7828
7829         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7830         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7831         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7832
7833         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7834
7835         /* sanity check the stage_time value */
7836         if (stage_time == 0)
7837                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7838         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7839                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7840         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7841                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7842
7843         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7844                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7845                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7846                 stage_time = ioa_cfg->transop_timeout;
7847                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7848         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7849                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7850                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7851                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7852                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7853                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7854                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7855                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7856                         return IPR_RC_JOB_CONTINUE;
7857                 }
7858         }
7859
7860         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7861         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7862         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7863         ipr_cmd->done = ipr_reset_ioa_job;
7864         add_timer(&ipr_cmd->timer);
7865
7866         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7867
7868         return IPR_RC_JOB_RETURN;
7869 }
7870
7871 /**
7872  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7873  * @ipr_cmd:    ipr command struct
7874  *
7875  * This function reinitializes some control blocks and
7876  * enables destructive diagnostics on the adapter.
7877  *
7878  * Return value:
7879  *      IPR_RC_JOB_RETURN
7880  **/
7881 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7882 {
7883         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7884         volatile u32 int_reg;
7885         volatile u64 maskval;
7886         int i;
7887
7888         ENTER;
7889         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7890         ipr_init_ioa_mem(ioa_cfg);
7891
7892         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7893                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7894                 ioa_cfg->hrrq[i].allow_interrupts = 1;
7895                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7896         }
7897         wmb();
7898         if (ioa_cfg->sis64) {
7899                 /* Set the adapter to the correct endian mode. */
7900                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7901                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7902         }
7903
7904         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7905
7906         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7907                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7908                        ioa_cfg->regs.clr_interrupt_mask_reg32);
7909                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7910                 return IPR_RC_JOB_CONTINUE;
7911         }
7912
7913         /* Enable destructive diagnostics on IOA */
7914         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7915
7916         if (ioa_cfg->sis64) {
7917                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7918                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7919                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7920         } else
7921                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7922
7923         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7924
7925         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7926
7927         if (ioa_cfg->sis64) {
7928                 ipr_cmd->job_step = ipr_reset_next_stage;
7929                 return IPR_RC_JOB_CONTINUE;
7930         }
7931
7932         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7933         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7934         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7935         ipr_cmd->done = ipr_reset_ioa_job;
7936         add_timer(&ipr_cmd->timer);
7937         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7938
7939         LEAVE;
7940         return IPR_RC_JOB_RETURN;
7941 }
7942
7943 /**
7944  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7945  * @ipr_cmd:    ipr command struct
7946  *
7947  * This function is invoked when an adapter dump has run out
7948  * of processing time.
7949  *
7950  * Return value:
7951  *      IPR_RC_JOB_CONTINUE
7952  **/
7953 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7954 {
7955         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7956
7957         if (ioa_cfg->sdt_state == GET_DUMP)
7958                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7959         else if (ioa_cfg->sdt_state == READ_DUMP)
7960                 ioa_cfg->sdt_state = ABORT_DUMP;
7961
7962         ioa_cfg->dump_timeout = 1;
7963         ipr_cmd->job_step = ipr_reset_alert;
7964
7965         return IPR_RC_JOB_CONTINUE;
7966 }
7967
7968 /**
7969  * ipr_unit_check_no_data - Log a unit check/no data error log
7970  * @ioa_cfg:            ioa config struct
7971  *
7972  * Logs an error indicating the adapter unit checked, but for some
7973  * reason, we were unable to fetch the unit check buffer.
7974  *
7975  * Return value:
7976  *      nothing
7977  **/
7978 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7979 {
7980         ioa_cfg->errors_logged++;
7981         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7982 }
7983
7984 /**
7985  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7986  * @ioa_cfg:            ioa config struct
7987  *
7988  * Fetches the unit check buffer from the adapter by clocking the data
7989  * through the mailbox register.
7990  *
7991  * Return value:
7992  *      nothing
7993  **/
7994 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7995 {
7996         unsigned long mailbox;
7997         struct ipr_hostrcb *hostrcb;
7998         struct ipr_uc_sdt sdt;
7999         int rc, length;
8000         u32 ioasc;
8001
8002         mailbox = readl(ioa_cfg->ioa_mailbox);
8003
8004         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8005                 ipr_unit_check_no_data(ioa_cfg);
8006                 return;
8007         }
8008
8009         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8010         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8011                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8012
8013         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8014             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8015             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8016                 ipr_unit_check_no_data(ioa_cfg);
8017                 return;
8018         }
8019
8020         /* Find length of the first sdt entry (UC buffer) */
8021         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8022                 length = be32_to_cpu(sdt.entry[0].end_token);
8023         else
8024                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8025                           be32_to_cpu(sdt.entry[0].start_token)) &
8026                           IPR_FMT2_MBX_ADDR_MASK;
8027
8028         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8029                              struct ipr_hostrcb, queue);
8030         list_del(&hostrcb->queue);
8031         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8032
8033         rc = ipr_get_ldump_data_section(ioa_cfg,
8034                                         be32_to_cpu(sdt.entry[0].start_token),
8035                                         (__be32 *)&hostrcb->hcam,
8036                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8037
8038         if (!rc) {
8039                 ipr_handle_log_data(ioa_cfg, hostrcb);
8040                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8041                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8042                     ioa_cfg->sdt_state == GET_DUMP)
8043                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8044         } else
8045                 ipr_unit_check_no_data(ioa_cfg);
8046
8047         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8048 }
8049
8050 /**
8051  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8052  * @ipr_cmd:    ipr command struct
8053  *
8054  * Description: This function will call to get the unit check buffer.
8055  *
8056  * Return value:
8057  *      IPR_RC_JOB_RETURN
8058  **/
8059 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8060 {
8061         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8062
8063         ENTER;
8064         ioa_cfg->ioa_unit_checked = 0;
8065         ipr_get_unit_check_buffer(ioa_cfg);
8066         ipr_cmd->job_step = ipr_reset_alert;
8067         ipr_reset_start_timer(ipr_cmd, 0);
8068
8069         LEAVE;
8070         return IPR_RC_JOB_RETURN;
8071 }
8072
8073 /**
8074  * ipr_reset_restore_cfg_space - Restore PCI config space.
8075  * @ipr_cmd:    ipr command struct
8076  *
8077  * Description: This function restores the saved PCI config space of
8078  * the adapter, fails all outstanding ops back to the callers, and
8079  * fetches the dump/unit check if applicable to this reset.
8080  *
8081  * Return value:
8082  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8083  **/
8084 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8085 {
8086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8087         u32 int_reg;
8088
8089         ENTER;
8090         ioa_cfg->pdev->state_saved = true;
8091         pci_restore_state(ioa_cfg->pdev);
8092
8093         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8094                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8095                 return IPR_RC_JOB_CONTINUE;
8096         }
8097
8098         ipr_fail_all_ops(ioa_cfg);
8099
8100         if (ioa_cfg->sis64) {
8101                 /* Set the adapter to the correct endian mode. */
8102                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8103                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8104         }
8105
8106         if (ioa_cfg->ioa_unit_checked) {
8107                 if (ioa_cfg->sis64) {
8108                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8109                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8110                         return IPR_RC_JOB_RETURN;
8111                 } else {
8112                         ioa_cfg->ioa_unit_checked = 0;
8113                         ipr_get_unit_check_buffer(ioa_cfg);
8114                         ipr_cmd->job_step = ipr_reset_alert;
8115                         ipr_reset_start_timer(ipr_cmd, 0);
8116                         return IPR_RC_JOB_RETURN;
8117                 }
8118         }
8119
8120         if (ioa_cfg->in_ioa_bringdown) {
8121                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8122         } else {
8123                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8124
8125                 if (GET_DUMP == ioa_cfg->sdt_state) {
8126                         ioa_cfg->sdt_state = READ_DUMP;
8127                         ioa_cfg->dump_timeout = 0;
8128                         if (ioa_cfg->sis64)
8129                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8130                         else
8131                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8132                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8133                         schedule_work(&ioa_cfg->work_q);
8134                         return IPR_RC_JOB_RETURN;
8135                 }
8136         }
8137
8138         LEAVE;
8139         return IPR_RC_JOB_CONTINUE;
8140 }
8141
8142 /**
8143  * ipr_reset_bist_done - BIST has completed on the adapter.
8144  * @ipr_cmd:    ipr command struct
8145  *
8146  * Description: Unblock config space and resume the reset process.
8147  *
8148  * Return value:
8149  *      IPR_RC_JOB_CONTINUE
8150  **/
8151 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8152 {
8153         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8154
8155         ENTER;
8156         if (ioa_cfg->cfg_locked)
8157                 pci_cfg_access_unlock(ioa_cfg->pdev);
8158         ioa_cfg->cfg_locked = 0;
8159         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8160         LEAVE;
8161         return IPR_RC_JOB_CONTINUE;
8162 }
8163
8164 /**
8165  * ipr_reset_start_bist - Run BIST on the adapter.
8166  * @ipr_cmd:    ipr command struct
8167  *
8168  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8169  *
8170  * Return value:
8171  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8172  **/
8173 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8174 {
8175         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8176         int rc = PCIBIOS_SUCCESSFUL;
8177
8178         ENTER;
8179         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8180                 writel(IPR_UPROCI_SIS64_START_BIST,
8181                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8182         else
8183                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8184
8185         if (rc == PCIBIOS_SUCCESSFUL) {
8186                 ipr_cmd->job_step = ipr_reset_bist_done;
8187                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8188                 rc = IPR_RC_JOB_RETURN;
8189         } else {
8190                 if (ioa_cfg->cfg_locked)
8191                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8192                 ioa_cfg->cfg_locked = 0;
8193                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8194                 rc = IPR_RC_JOB_CONTINUE;
8195         }
8196
8197         LEAVE;
8198         return rc;
8199 }
8200
8201 /**
8202  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8203  * @ipr_cmd:    ipr command struct
8204  *
8205  * Description: This clears PCI reset to the adapter and delays two seconds.
8206  *
8207  * Return value:
8208  *      IPR_RC_JOB_RETURN
8209  **/
8210 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8211 {
8212         ENTER;
8213         pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8214         ipr_cmd->job_step = ipr_reset_bist_done;
8215         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8216         LEAVE;
8217         return IPR_RC_JOB_RETURN;
8218 }
8219
8220 /**
8221  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8222  * @ipr_cmd:    ipr command struct
8223  *
8224  * Description: This asserts PCI reset to the adapter.
8225  *
8226  * Return value:
8227  *      IPR_RC_JOB_RETURN
8228  **/
8229 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8230 {
8231         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8232         struct pci_dev *pdev = ioa_cfg->pdev;
8233
8234         ENTER;
8235         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8236         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8237         ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8238         LEAVE;
8239         return IPR_RC_JOB_RETURN;
8240 }
8241
8242 /**
8243  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8244  * @ipr_cmd:    ipr command struct
8245  *
8246  * Description: This attempts to block config access to the IOA.
8247  *
8248  * Return value:
8249  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8250  **/
8251 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8252 {
8253         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8254         int rc = IPR_RC_JOB_CONTINUE;
8255
8256         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8257                 ioa_cfg->cfg_locked = 1;
8258                 ipr_cmd->job_step = ioa_cfg->reset;
8259         } else {
8260                 if (ipr_cmd->u.time_left) {
8261                         rc = IPR_RC_JOB_RETURN;
8262                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8263                         ipr_reset_start_timer(ipr_cmd,
8264                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8265                 } else {
8266                         ipr_cmd->job_step = ioa_cfg->reset;
8267                         dev_err(&ioa_cfg->pdev->dev,
8268                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8269                 }
8270         }
8271
8272         return rc;
8273 }
8274
8275 /**
8276  * ipr_reset_block_config_access - Block config access to the IOA
8277  * @ipr_cmd:    ipr command struct
8278  *
8279  * Description: This attempts to block config access to the IOA
8280  *
8281  * Return value:
8282  *      IPR_RC_JOB_CONTINUE
8283  **/
8284 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8285 {
8286         ipr_cmd->ioa_cfg->cfg_locked = 0;
8287         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8288         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8289         return IPR_RC_JOB_CONTINUE;
8290 }
8291
8292 /**
8293  * ipr_reset_allowed - Query whether or not IOA can be reset
8294  * @ioa_cfg:    ioa config struct
8295  *
8296  * Return value:
8297  *      0 if reset not allowed / non-zero if reset is allowed
8298  **/
8299 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8300 {
8301         volatile u32 temp_reg;
8302
8303         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8304         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8305 }
8306
8307 /**
8308  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8309  * @ipr_cmd:    ipr command struct
8310  *
8311  * Description: This function waits for adapter permission to run BIST,
8312  * then runs BIST. If the adapter does not give permission after a
8313  * reasonable time, we will reset the adapter anyway. The impact of
8314  * resetting the adapter without warning the adapter is the risk of
8315  * losing the persistent error log on the adapter. If the adapter is
8316  * reset while it is writing to the flash on the adapter, the flash
8317  * segment will have bad ECC and be zeroed.
8318  *
8319  * Return value:
8320  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8321  **/
8322 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8323 {
8324         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8325         int rc = IPR_RC_JOB_RETURN;
8326
8327         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8328                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8329                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8330         } else {
8331                 ipr_cmd->job_step = ipr_reset_block_config_access;
8332                 rc = IPR_RC_JOB_CONTINUE;
8333         }
8334
8335         return rc;
8336 }
8337
8338 /**
8339  * ipr_reset_alert - Alert the adapter of a pending reset
8340  * @ipr_cmd:    ipr command struct
8341  *
8342  * Description: This function alerts the adapter that it will be reset.
8343  * If memory space is not currently enabled, proceed directly
8344  * to running BIST on the adapter. The timer must always be started
8345  * so we guarantee we do not run BIST from ipr_isr.
8346  *
8347  * Return value:
8348  *      IPR_RC_JOB_RETURN
8349  **/
8350 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8351 {
8352         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8353         u16 cmd_reg;
8354         int rc;
8355
8356         ENTER;
8357         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8358
8359         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8360                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8361                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8362                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8363         } else {
8364                 ipr_cmd->job_step = ipr_reset_block_config_access;
8365         }
8366
8367         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8368         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8369
8370         LEAVE;
8371         return IPR_RC_JOB_RETURN;
8372 }
8373
8374 /**
8375  * ipr_reset_ucode_download_done - Microcode download completion
8376  * @ipr_cmd:    ipr command struct
8377  *
8378  * Description: This function unmaps the microcode download buffer.
8379  *
8380  * Return value:
8381  *      IPR_RC_JOB_CONTINUE
8382  **/
8383 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8384 {
8385         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8386         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8387
8388         pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8389                      sglist->num_sg, DMA_TO_DEVICE);
8390
8391         ipr_cmd->job_step = ipr_reset_alert;
8392         return IPR_RC_JOB_CONTINUE;
8393 }
8394
8395 /**
8396  * ipr_reset_ucode_download - Download microcode to the adapter
8397  * @ipr_cmd:    ipr command struct
8398  *
8399  * Description: This function checks to see if it there is microcode
8400  * to download to the adapter. If there is, a download is performed.
8401  *
8402  * Return value:
8403  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8404  **/
8405 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8406 {
8407         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8408         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8409
8410         ENTER;
8411         ipr_cmd->job_step = ipr_reset_alert;
8412
8413         if (!sglist)
8414                 return IPR_RC_JOB_CONTINUE;
8415
8416         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8417         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8418         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8419         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8420         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8421         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8422         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8423
8424         if (ioa_cfg->sis64)
8425                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8426         else
8427                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8428         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8429
8430         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8431                    IPR_WRITE_BUFFER_TIMEOUT);
8432
8433         LEAVE;
8434         return IPR_RC_JOB_RETURN;
8435 }
8436
8437 /**
8438  * ipr_reset_shutdown_ioa - Shutdown the adapter
8439  * @ipr_cmd:    ipr command struct
8440  *
8441  * Description: This function issues an adapter shutdown of the
8442  * specified type to the specified adapter as part of the
8443  * adapter reset job.
8444  *
8445  * Return value:
8446  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8447  **/
8448 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8449 {
8450         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8451         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8452         unsigned long timeout;
8453         int rc = IPR_RC_JOB_CONTINUE;
8454
8455         ENTER;
8456         if (shutdown_type != IPR_SHUTDOWN_NONE &&
8457                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8458                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8459                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8460                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8461                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8462
8463                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8464                         timeout = IPR_SHUTDOWN_TIMEOUT;
8465                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8466                         timeout = IPR_INTERNAL_TIMEOUT;
8467                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8468                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8469                 else
8470                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8471
8472                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8473
8474                 rc = IPR_RC_JOB_RETURN;
8475                 ipr_cmd->job_step = ipr_reset_ucode_download;
8476         } else
8477                 ipr_cmd->job_step = ipr_reset_alert;
8478
8479         LEAVE;
8480         return rc;
8481 }
8482
8483 /**
8484  * ipr_reset_ioa_job - Adapter reset job
8485  * @ipr_cmd:    ipr command struct
8486  *
8487  * Description: This function is the job router for the adapter reset job.
8488  *
8489  * Return value:
8490  *      none
8491  **/
8492 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8493 {
8494         u32 rc, ioasc;
8495         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8496
8497         do {
8498                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8499
8500                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8501                         /*
8502                          * We are doing nested adapter resets and this is
8503                          * not the current reset job.
8504                          */
8505                         list_add_tail(&ipr_cmd->queue,
8506                                         &ipr_cmd->hrrq->hrrq_free_q);
8507                         return;
8508                 }
8509
8510                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8511                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8512                         if (rc == IPR_RC_JOB_RETURN)
8513                                 return;
8514                 }
8515
8516                 ipr_reinit_ipr_cmnd(ipr_cmd);
8517                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8518                 rc = ipr_cmd->job_step(ipr_cmd);
8519         } while (rc == IPR_RC_JOB_CONTINUE);
8520 }
8521
8522 /**
8523  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8524  * @ioa_cfg:            ioa config struct
8525  * @job_step:           first job step of reset job
8526  * @shutdown_type:      shutdown type
8527  *
8528  * Description: This function will initiate the reset of the given adapter
8529  * starting at the selected job step.
8530  * If the caller needs to wait on the completion of the reset,
8531  * the caller must sleep on the reset_wait_q.
8532  *
8533  * Return value:
8534  *      none
8535  **/
8536 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8537                                     int (*job_step) (struct ipr_cmnd *),
8538                                     enum ipr_shutdown_type shutdown_type)
8539 {
8540         struct ipr_cmnd *ipr_cmd;
8541         int i;
8542
8543         ioa_cfg->in_reset_reload = 1;
8544         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8545                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8546                 ioa_cfg->hrrq[i].allow_cmds = 0;
8547                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8548         }
8549         wmb();
8550         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8551                 scsi_block_requests(ioa_cfg->host);
8552
8553         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8554         ioa_cfg->reset_cmd = ipr_cmd;
8555         ipr_cmd->job_step = job_step;
8556         ipr_cmd->u.shutdown_type = shutdown_type;
8557
8558         ipr_reset_ioa_job(ipr_cmd);
8559 }
8560
8561 /**
8562  * ipr_initiate_ioa_reset - Initiate an adapter reset
8563  * @ioa_cfg:            ioa config struct
8564  * @shutdown_type:      shutdown type
8565  *
8566  * Description: This function will initiate the reset of the given adapter.
8567  * If the caller needs to wait on the completion of the reset,
8568  * the caller must sleep on the reset_wait_q.
8569  *
8570  * Return value:
8571  *      none
8572  **/
8573 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8574                                    enum ipr_shutdown_type shutdown_type)
8575 {
8576         int i;
8577
8578         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8579                 return;
8580
8581         if (ioa_cfg->in_reset_reload) {
8582                 if (ioa_cfg->sdt_state == GET_DUMP)
8583                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8584                 else if (ioa_cfg->sdt_state == READ_DUMP)
8585                         ioa_cfg->sdt_state = ABORT_DUMP;
8586         }
8587
8588         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8589                 dev_err(&ioa_cfg->pdev->dev,
8590                         "IOA taken offline - error recovery failed\n");
8591
8592                 ioa_cfg->reset_retries = 0;
8593                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8594                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8595                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8596                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8597                 }
8598                 wmb();
8599
8600                 if (ioa_cfg->in_ioa_bringdown) {
8601                         ioa_cfg->reset_cmd = NULL;
8602                         ioa_cfg->in_reset_reload = 0;
8603                         ipr_fail_all_ops(ioa_cfg);
8604                         wake_up_all(&ioa_cfg->reset_wait_q);
8605
8606                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8607                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8608                                 scsi_unblock_requests(ioa_cfg->host);
8609                                 spin_lock_irq(ioa_cfg->host->host_lock);
8610                         }
8611                         return;
8612                 } else {
8613                         ioa_cfg->in_ioa_bringdown = 1;
8614                         shutdown_type = IPR_SHUTDOWN_NONE;
8615                 }
8616         }
8617
8618         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8619                                 shutdown_type);
8620 }
8621
8622 /**
8623  * ipr_reset_freeze - Hold off all I/O activity
8624  * @ipr_cmd:    ipr command struct
8625  *
8626  * Description: If the PCI slot is frozen, hold off all I/O
8627  * activity; then, as soon as the slot is available again,
8628  * initiate an adapter reset.
8629  */
8630 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8631 {
8632         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8633         int i;
8634
8635         /* Disallow new interrupts, avoid loop */
8636         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8637                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8638                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8639                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8640         }
8641         wmb();
8642         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8643         ipr_cmd->done = ipr_reset_ioa_job;
8644         return IPR_RC_JOB_RETURN;
8645 }
8646
8647 /**
8648  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8649  * @pdev:       PCI device struct
8650  *
8651  * Description: This routine is called to tell us that the MMIO
8652  * access to the IOA has been restored
8653  */
8654 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8655 {
8656         unsigned long flags = 0;
8657         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8658
8659         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8660         if (!ioa_cfg->probe_done)
8661                 pci_save_state(pdev);
8662         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8663         return PCI_ERS_RESULT_NEED_RESET;
8664 }
8665
8666 /**
8667  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8668  * @pdev:       PCI device struct
8669  *
8670  * Description: This routine is called to tell us that the PCI bus
8671  * is down. Can't do anything here, except put the device driver
8672  * into a holding pattern, waiting for the PCI bus to come back.
8673  */
8674 static void ipr_pci_frozen(struct pci_dev *pdev)
8675 {
8676         unsigned long flags = 0;
8677         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8678
8679         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8680         if (ioa_cfg->probe_done)
8681                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8682         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8683 }
8684
8685 /**
8686  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8687  * @pdev:       PCI device struct
8688  *
8689  * Description: This routine is called by the pci error recovery
8690  * code after the PCI slot has been reset, just before we
8691  * should resume normal operations.
8692  */
8693 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8694 {
8695         unsigned long flags = 0;
8696         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8697
8698         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8699         if (ioa_cfg->probe_done) {
8700                 if (ioa_cfg->needs_warm_reset)
8701                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8702                 else
8703                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8704                                                 IPR_SHUTDOWN_NONE);
8705         } else
8706                 wake_up_all(&ioa_cfg->eeh_wait_q);
8707         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8708         return PCI_ERS_RESULT_RECOVERED;
8709 }
8710
8711 /**
8712  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8713  * @pdev:       PCI device struct
8714  *
8715  * Description: This routine is called when the PCI bus has
8716  * permanently failed.
8717  */
8718 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8719 {
8720         unsigned long flags = 0;
8721         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8722         int i;
8723
8724         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8725         if (ioa_cfg->probe_done) {
8726                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8727                         ioa_cfg->sdt_state = ABORT_DUMP;
8728                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8729                 ioa_cfg->in_ioa_bringdown = 1;
8730                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8731                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8732                         ioa_cfg->hrrq[i].allow_cmds = 0;
8733                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8734                 }
8735                 wmb();
8736                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8737         } else
8738                 wake_up_all(&ioa_cfg->eeh_wait_q);
8739         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8740 }
8741
8742 /**
8743  * ipr_pci_error_detected - Called when a PCI error is detected.
8744  * @pdev:       PCI device struct
8745  * @state:      PCI channel state
8746  *
8747  * Description: Called when a PCI error is detected.
8748  *
8749  * Return value:
8750  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8751  */
8752 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8753                                                pci_channel_state_t state)
8754 {
8755         switch (state) {
8756         case pci_channel_io_frozen:
8757                 ipr_pci_frozen(pdev);
8758                 return PCI_ERS_RESULT_CAN_RECOVER;
8759         case pci_channel_io_perm_failure:
8760                 ipr_pci_perm_failure(pdev);
8761                 return PCI_ERS_RESULT_DISCONNECT;
8762                 break;
8763         default:
8764                 break;
8765         }
8766         return PCI_ERS_RESULT_NEED_RESET;
8767 }
8768
8769 /**
8770  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8771  * @ioa_cfg:    ioa cfg struct
8772  *
8773  * Description: This is the second phase of adapter intialization
8774  * This function takes care of initilizing the adapter to the point
8775  * where it can accept new commands.
8776
8777  * Return value:
8778  *      0 on success / -EIO on failure
8779  **/
8780 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8781 {
8782         int rc = 0;
8783         unsigned long host_lock_flags = 0;
8784
8785         ENTER;
8786         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8787         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8788         ioa_cfg->probe_done = 1;
8789         if (ioa_cfg->needs_hard_reset) {
8790                 ioa_cfg->needs_hard_reset = 0;
8791                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8792         } else
8793                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8794                                         IPR_SHUTDOWN_NONE);
8795         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8796         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8797         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8798
8799         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8800                 rc = -EIO;
8801         } else if (ipr_invalid_adapter(ioa_cfg)) {
8802                 if (!ipr_testmode)
8803                         rc = -EIO;
8804
8805                 dev_err(&ioa_cfg->pdev->dev,
8806                         "Adapter not supported in this hardware configuration.\n");
8807         }
8808
8809         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8810
8811         LEAVE;
8812         return rc;
8813 }
8814
8815 /**
8816  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8817  * @ioa_cfg:    ioa config struct
8818  *
8819  * Return value:
8820  *      none
8821  **/
8822 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8823 {
8824         int i;
8825
8826         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8827                 if (ioa_cfg->ipr_cmnd_list[i])
8828                         pci_pool_free(ioa_cfg->ipr_cmd_pool,
8829                                       ioa_cfg->ipr_cmnd_list[i],
8830                                       ioa_cfg->ipr_cmnd_list_dma[i]);
8831
8832                 ioa_cfg->ipr_cmnd_list[i] = NULL;
8833         }
8834
8835         if (ioa_cfg->ipr_cmd_pool)
8836                 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8837
8838         kfree(ioa_cfg->ipr_cmnd_list);
8839         kfree(ioa_cfg->ipr_cmnd_list_dma);
8840         ioa_cfg->ipr_cmnd_list = NULL;
8841         ioa_cfg->ipr_cmnd_list_dma = NULL;
8842         ioa_cfg->ipr_cmd_pool = NULL;
8843 }
8844
8845 /**
8846  * ipr_free_mem - Frees memory allocated for an adapter
8847  * @ioa_cfg:    ioa cfg struct
8848  *
8849  * Return value:
8850  *      nothing
8851  **/
8852 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8853 {
8854         int i;
8855
8856         kfree(ioa_cfg->res_entries);
8857         pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8858                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8859         ipr_free_cmd_blks(ioa_cfg);
8860
8861         for (i = 0; i < ioa_cfg->hrrq_num; i++)
8862                 pci_free_consistent(ioa_cfg->pdev,
8863                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
8864                                         ioa_cfg->hrrq[i].host_rrq,
8865                                         ioa_cfg->hrrq[i].host_rrq_dma);
8866
8867         pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8868                             ioa_cfg->u.cfg_table,
8869                             ioa_cfg->cfg_table_dma);
8870
8871         for (i = 0; i < IPR_NUM_HCAMS; i++) {
8872                 pci_free_consistent(ioa_cfg->pdev,
8873                                     sizeof(struct ipr_hostrcb),
8874                                     ioa_cfg->hostrcb[i],
8875                                     ioa_cfg->hostrcb_dma[i]);
8876         }
8877
8878         ipr_free_dump(ioa_cfg);
8879         kfree(ioa_cfg->trace);
8880 }
8881
8882 /**
8883  * ipr_free_all_resources - Free all allocated resources for an adapter.
8884  * @ipr_cmd:    ipr command struct
8885  *
8886  * This function frees all allocated resources for the
8887  * specified adapter.
8888  *
8889  * Return value:
8890  *      none
8891  **/
8892 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8893 {
8894         struct pci_dev *pdev = ioa_cfg->pdev;
8895
8896         ENTER;
8897         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
8898             ioa_cfg->intr_flag == IPR_USE_MSIX) {
8899                 int i;
8900                 for (i = 0; i < ioa_cfg->nvectors; i++)
8901                         free_irq(ioa_cfg->vectors_info[i].vec,
8902                                 &ioa_cfg->hrrq[i]);
8903         } else
8904                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
8905
8906         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
8907                 pci_disable_msi(pdev);
8908                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
8909         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
8910                 pci_disable_msix(pdev);
8911                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
8912         }
8913
8914         iounmap(ioa_cfg->hdw_dma_regs);
8915         pci_release_regions(pdev);
8916         ipr_free_mem(ioa_cfg);
8917         scsi_host_put(ioa_cfg->host);
8918         pci_disable_device(pdev);
8919         LEAVE;
8920 }
8921
8922 /**
8923  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8924  * @ioa_cfg:    ioa config struct
8925  *
8926  * Return value:
8927  *      0 on success / -ENOMEM on allocation failure
8928  **/
8929 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8930 {
8931         struct ipr_cmnd *ipr_cmd;
8932         struct ipr_ioarcb *ioarcb;
8933         dma_addr_t dma_addr;
8934         int i, entries_each_hrrq, hrrq_id = 0;
8935
8936         ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
8937                                                 sizeof(struct ipr_cmnd), 512, 0);
8938
8939         if (!ioa_cfg->ipr_cmd_pool)
8940                 return -ENOMEM;
8941
8942         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8943         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8944
8945         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8946                 ipr_free_cmd_blks(ioa_cfg);
8947                 return -ENOMEM;
8948         }
8949
8950         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8951                 if (ioa_cfg->hrrq_num > 1) {
8952                         if (i == 0) {
8953                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8954                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
8955                                         ioa_cfg->hrrq[i].max_cmd_id =
8956                                                 (entries_each_hrrq - 1);
8957                         } else {
8958                                 entries_each_hrrq =
8959                                         IPR_NUM_BASE_CMD_BLKS/
8960                                         (ioa_cfg->hrrq_num - 1);
8961                                 ioa_cfg->hrrq[i].min_cmd_id =
8962                                         IPR_NUM_INTERNAL_CMD_BLKS +
8963                                         (i - 1) * entries_each_hrrq;
8964                                 ioa_cfg->hrrq[i].max_cmd_id =
8965                                         (IPR_NUM_INTERNAL_CMD_BLKS +
8966                                         i * entries_each_hrrq - 1);
8967                         }
8968                 } else {
8969                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
8970                         ioa_cfg->hrrq[i].min_cmd_id = 0;
8971                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8972                 }
8973                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
8974         }
8975
8976         BUG_ON(ioa_cfg->hrrq_num == 0);
8977
8978         i = IPR_NUM_CMD_BLKS -
8979                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8980         if (i > 0) {
8981                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8982                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8983         }
8984
8985         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8986                 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8987
8988                 if (!ipr_cmd) {
8989                         ipr_free_cmd_blks(ioa_cfg);
8990                         return -ENOMEM;
8991                 }
8992
8993                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8994                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8995                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8996
8997                 ioarcb = &ipr_cmd->ioarcb;
8998                 ipr_cmd->dma_addr = dma_addr;
8999                 if (ioa_cfg->sis64)
9000                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9001                 else
9002                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9003
9004                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9005                 if (ioa_cfg->sis64) {
9006                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9007                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9008                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9009                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9010                 } else {
9011                         ioarcb->write_ioadl_addr =
9012                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9013                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9014                         ioarcb->ioasa_host_pci_addr =
9015                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9016                 }
9017                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9018                 ipr_cmd->cmd_index = i;
9019                 ipr_cmd->ioa_cfg = ioa_cfg;
9020                 ipr_cmd->sense_buffer_dma = dma_addr +
9021                         offsetof(struct ipr_cmnd, sense_buffer);
9022
9023                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9024                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9025                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9026                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9027                         hrrq_id++;
9028         }
9029
9030         return 0;
9031 }
9032
9033 /**
9034  * ipr_alloc_mem - Allocate memory for an adapter
9035  * @ioa_cfg:    ioa config struct
9036  *
9037  * Return value:
9038  *      0 on success / non-zero for error
9039  **/
9040 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9041 {
9042         struct pci_dev *pdev = ioa_cfg->pdev;
9043         int i, rc = -ENOMEM;
9044
9045         ENTER;
9046         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9047                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9048
9049         if (!ioa_cfg->res_entries)
9050                 goto out;
9051
9052         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9053                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9054                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9055         }
9056
9057         ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9058                                                 sizeof(struct ipr_misc_cbs),
9059                                                 &ioa_cfg->vpd_cbs_dma);
9060
9061         if (!ioa_cfg->vpd_cbs)
9062                 goto out_free_res_entries;
9063
9064         if (ipr_alloc_cmd_blks(ioa_cfg))
9065                 goto out_free_vpd_cbs;
9066
9067         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9068                 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9069                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9070                                         &ioa_cfg->hrrq[i].host_rrq_dma);
9071
9072                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9073                         while (--i > 0)
9074                                 pci_free_consistent(pdev,
9075                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9076                                         ioa_cfg->hrrq[i].host_rrq,
9077                                         ioa_cfg->hrrq[i].host_rrq_dma);
9078                         goto out_ipr_free_cmd_blocks;
9079                 }
9080                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9081         }
9082
9083         ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9084                                                     ioa_cfg->cfg_table_size,
9085                                                     &ioa_cfg->cfg_table_dma);
9086
9087         if (!ioa_cfg->u.cfg_table)
9088                 goto out_free_host_rrq;
9089
9090         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9091                 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9092                                                            sizeof(struct ipr_hostrcb),
9093                                                            &ioa_cfg->hostrcb_dma[i]);
9094
9095                 if (!ioa_cfg->hostrcb[i])
9096                         goto out_free_hostrcb_dma;
9097
9098                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9099                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9100                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9101                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9102         }
9103
9104         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9105                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9106
9107         if (!ioa_cfg->trace)
9108                 goto out_free_hostrcb_dma;
9109
9110         rc = 0;
9111 out:
9112         LEAVE;
9113         return rc;
9114
9115 out_free_hostrcb_dma:
9116         while (i-- > 0) {
9117                 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9118                                     ioa_cfg->hostrcb[i],
9119                                     ioa_cfg->hostrcb_dma[i]);
9120         }
9121         pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9122                             ioa_cfg->u.cfg_table,
9123                             ioa_cfg->cfg_table_dma);
9124 out_free_host_rrq:
9125         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9126                 pci_free_consistent(pdev,
9127                                 sizeof(u32) * ioa_cfg->hrrq[i].size,
9128                                 ioa_cfg->hrrq[i].host_rrq,
9129                                 ioa_cfg->hrrq[i].host_rrq_dma);
9130         }
9131 out_ipr_free_cmd_blocks:
9132         ipr_free_cmd_blks(ioa_cfg);
9133 out_free_vpd_cbs:
9134         pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9135                             ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9136 out_free_res_entries:
9137         kfree(ioa_cfg->res_entries);
9138         goto out;
9139 }
9140
9141 /**
9142  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9143  * @ioa_cfg:    ioa config struct
9144  *
9145  * Return value:
9146  *      none
9147  **/
9148 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9149 {
9150         int i;
9151
9152         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9153                 ioa_cfg->bus_attr[i].bus = i;
9154                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9155                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9156                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9157                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9158                 else
9159                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9160         }
9161 }
9162
9163 /**
9164  * ipr_init_regs - Initialize IOA registers
9165  * @ioa_cfg:    ioa config struct
9166  *
9167  * Return value:
9168  *      none
9169  **/
9170 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9171 {
9172         const struct ipr_interrupt_offsets *p;
9173         struct ipr_interrupts *t;
9174         void __iomem *base;
9175
9176         p = &ioa_cfg->chip_cfg->regs;
9177         t = &ioa_cfg->regs;
9178         base = ioa_cfg->hdw_dma_regs;
9179
9180         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9181         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9182         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9183         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9184         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9185         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9186         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9187         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9188         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9189         t->ioarrin_reg = base + p->ioarrin_reg;
9190         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9191         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9192         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9193         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9194         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9195         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9196
9197         if (ioa_cfg->sis64) {
9198                 t->init_feedback_reg = base + p->init_feedback_reg;
9199                 t->dump_addr_reg = base + p->dump_addr_reg;
9200                 t->dump_data_reg = base + p->dump_data_reg;
9201                 t->endian_swap_reg = base + p->endian_swap_reg;
9202         }
9203 }
9204
9205 /**
9206  * ipr_init_ioa_cfg - Initialize IOA config struct
9207  * @ioa_cfg:    ioa config struct
9208  * @host:               scsi host struct
9209  * @pdev:               PCI dev struct
9210  *
9211  * Return value:
9212  *      none
9213  **/
9214 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9215                              struct Scsi_Host *host, struct pci_dev *pdev)
9216 {
9217         int i;
9218
9219         ioa_cfg->host = host;
9220         ioa_cfg->pdev = pdev;
9221         ioa_cfg->log_level = ipr_log_level;
9222         ioa_cfg->doorbell = IPR_DOORBELL;
9223         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9224         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9225         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9226         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9227         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9228         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9229
9230         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9231         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9232         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9233         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9234         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9235         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9236         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9237         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9238         ioa_cfg->sdt_state = INACTIVE;
9239
9240         ipr_initialize_bus_attr(ioa_cfg);
9241         ioa_cfg->max_devs_supported = ipr_max_devs;
9242
9243         if (ioa_cfg->sis64) {
9244                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9245                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9246                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9247                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9248                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9249                                            + ((sizeof(struct ipr_config_table_entry64)
9250                                                * ioa_cfg->max_devs_supported)));
9251         } else {
9252                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9253                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9254                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9255                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9256                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9257                                            + ((sizeof(struct ipr_config_table_entry)
9258                                                * ioa_cfg->max_devs_supported)));
9259         }
9260
9261         host->max_channel = IPR_MAX_BUS_TO_SCAN;
9262         host->unique_id = host->host_no;
9263         host->max_cmd_len = IPR_MAX_CDB_LEN;
9264         host->can_queue = ioa_cfg->max_cmds;
9265         pci_set_drvdata(pdev, ioa_cfg);
9266
9267         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9268                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9269                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9270                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9271                 if (i == 0)
9272                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9273                 else
9274                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9275         }
9276 }
9277
9278 /**
9279  * ipr_get_chip_info - Find adapter chip information
9280  * @dev_id:             PCI device id struct
9281  *
9282  * Return value:
9283  *      ptr to chip information on success / NULL on failure
9284  **/
9285 static const struct ipr_chip_t *
9286 ipr_get_chip_info(const struct pci_device_id *dev_id)
9287 {
9288         int i;
9289
9290         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9291                 if (ipr_chip[i].vendor == dev_id->vendor &&
9292                     ipr_chip[i].device == dev_id->device)
9293                         return &ipr_chip[i];
9294         return NULL;
9295 }
9296
9297 /**
9298  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9299  *                                              during probe time
9300  * @ioa_cfg:    ioa config struct
9301  *
9302  * Return value:
9303  *      None
9304  **/
9305 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9306 {
9307         struct pci_dev *pdev = ioa_cfg->pdev;
9308
9309         if (pci_channel_offline(pdev)) {
9310                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9311                                    !pci_channel_offline(pdev),
9312                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9313                 pci_restore_state(pdev);
9314         }
9315 }
9316
9317 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9318 {
9319         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9320         int i, err, vectors;
9321
9322         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9323                 entries[i].entry = i;
9324
9325         vectors = ipr_number_of_msix;
9326
9327         while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
9328                         vectors = err;
9329
9330         if (err < 0) {
9331                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9332                 pci_disable_msix(ioa_cfg->pdev);
9333                 return err;
9334         }
9335
9336         if (!err) {
9337                 for (i = 0; i < vectors; i++)
9338                         ioa_cfg->vectors_info[i].vec = entries[i].vector;
9339                 ioa_cfg->nvectors = vectors;
9340         }
9341
9342         return err;
9343 }
9344
9345 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9346 {
9347         int i, err, vectors;
9348
9349         vectors = ipr_number_of_msix;
9350
9351         while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
9352                         vectors = err;
9353
9354         if (err < 0) {
9355                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9356                 pci_disable_msi(ioa_cfg->pdev);
9357                 return err;
9358         }
9359
9360         if (!err) {
9361                 for (i = 0; i < vectors; i++)
9362                         ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9363                 ioa_cfg->nvectors = vectors;
9364         }
9365
9366         return err;
9367 }
9368
9369 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9370 {
9371         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9372
9373         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9374                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9375                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9376                 ioa_cfg->vectors_info[vec_idx].
9377                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9378         }
9379 }
9380
9381 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9382 {
9383         int i, rc;
9384
9385         for (i = 1; i < ioa_cfg->nvectors; i++) {
9386                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9387                         ipr_isr_mhrrq,
9388                         0,
9389                         ioa_cfg->vectors_info[i].desc,
9390                         &ioa_cfg->hrrq[i]);
9391                 if (rc) {
9392                         while (--i >= 0)
9393                                 free_irq(ioa_cfg->vectors_info[i].vec,
9394                                         &ioa_cfg->hrrq[i]);
9395                         return rc;
9396                 }
9397         }
9398         return 0;
9399 }
9400
9401 /**
9402  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9403  * @pdev:               PCI device struct
9404  *
9405  * Description: Simply set the msi_received flag to 1 indicating that
9406  * Message Signaled Interrupts are supported.
9407  *
9408  * Return value:
9409  *      0 on success / non-zero on failure
9410  **/
9411 static irqreturn_t ipr_test_intr(int irq, void *devp)
9412 {
9413         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9414         unsigned long lock_flags = 0;
9415         irqreturn_t rc = IRQ_HANDLED;
9416
9417         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9418         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9419
9420         ioa_cfg->msi_received = 1;
9421         wake_up(&ioa_cfg->msi_wait_q);
9422
9423         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9424         return rc;
9425 }
9426
9427 /**
9428  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9429  * @pdev:               PCI device struct
9430  *
9431  * Description: The return value from pci_enable_msi() can not always be
9432  * trusted.  This routine sets up and initiates a test interrupt to determine
9433  * if the interrupt is received via the ipr_test_intr() service routine.
9434  * If the tests fails, the driver will fall back to LSI.
9435  *
9436  * Return value:
9437  *      0 on success / non-zero on failure
9438  **/
9439 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9440 {
9441         int rc;
9442         volatile u32 int_reg;
9443         unsigned long lock_flags = 0;
9444
9445         ENTER;
9446
9447         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9448         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9449         ioa_cfg->msi_received = 0;
9450         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9451         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9452         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9453         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9454
9455         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9456                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9457         else
9458                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9459         if (rc) {
9460                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9461                 return rc;
9462         } else if (ipr_debug)
9463                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9464
9465         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9466         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9467         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9468         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9469         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9470
9471         if (!ioa_cfg->msi_received) {
9472                 /* MSI test failed */
9473                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9474                 rc = -EOPNOTSUPP;
9475         } else if (ipr_debug)
9476                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9477
9478         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9479
9480         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9481                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9482         else
9483                 free_irq(pdev->irq, ioa_cfg);
9484
9485         LEAVE;
9486
9487         return rc;
9488 }
9489
9490  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9491  * @pdev:               PCI device struct
9492  * @dev_id:             PCI device id struct
9493  *
9494  * Return value:
9495  *      0 on success / non-zero on failure
9496  **/
9497 static int ipr_probe_ioa(struct pci_dev *pdev,
9498                          const struct pci_device_id *dev_id)
9499 {
9500         struct ipr_ioa_cfg *ioa_cfg;
9501         struct Scsi_Host *host;
9502         unsigned long ipr_regs_pci;
9503         void __iomem *ipr_regs;
9504         int rc = PCIBIOS_SUCCESSFUL;
9505         volatile u32 mask, uproc, interrupts;
9506         unsigned long lock_flags, driver_lock_flags;
9507
9508         ENTER;
9509
9510         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9511         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9512
9513         if (!host) {
9514                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9515                 rc = -ENOMEM;
9516                 goto out;
9517         }
9518
9519         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9520         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9521         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9522
9523         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9524
9525         if (!ioa_cfg->ipr_chip) {
9526                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9527                         dev_id->vendor, dev_id->device);
9528                 goto out_scsi_host_put;
9529         }
9530
9531         /* set SIS 32 or SIS 64 */
9532         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9533         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9534         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9535         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9536
9537         if (ipr_transop_timeout)
9538                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9539         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9540                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9541         else
9542                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9543
9544         ioa_cfg->revid = pdev->revision;
9545
9546         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9547
9548         ipr_regs_pci = pci_resource_start(pdev, 0);
9549
9550         rc = pci_request_regions(pdev, IPR_NAME);
9551         if (rc < 0) {
9552                 dev_err(&pdev->dev,
9553                         "Couldn't register memory range of registers\n");
9554                 goto out_scsi_host_put;
9555         }
9556
9557         rc = pci_enable_device(pdev);
9558
9559         if (rc || pci_channel_offline(pdev)) {
9560                 if (pci_channel_offline(pdev)) {
9561                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9562                         rc = pci_enable_device(pdev);
9563                 }
9564
9565                 if (rc) {
9566                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9567                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9568                         goto out_release_regions;
9569                 }
9570         }
9571
9572         ipr_regs = pci_ioremap_bar(pdev, 0);
9573
9574         if (!ipr_regs) {
9575                 dev_err(&pdev->dev,
9576                         "Couldn't map memory range of registers\n");
9577                 rc = -ENOMEM;
9578                 goto out_disable;
9579         }
9580
9581         ioa_cfg->hdw_dma_regs = ipr_regs;
9582         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9583         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9584
9585         ipr_init_regs(ioa_cfg);
9586
9587         if (ioa_cfg->sis64) {
9588                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9589                 if (rc < 0) {
9590                         dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9591                         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9592                 }
9593         } else
9594                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9595
9596         if (rc < 0) {
9597                 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9598                 goto cleanup_nomem;
9599         }
9600
9601         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9602                                    ioa_cfg->chip_cfg->cache_line_size);
9603
9604         if (rc != PCIBIOS_SUCCESSFUL) {
9605                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9606                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9607                 rc = -EIO;
9608                 goto cleanup_nomem;
9609         }
9610
9611         /* Issue MMIO read to ensure card is not in EEH */
9612         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9613         ipr_wait_for_pci_err_recovery(ioa_cfg);
9614
9615         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9616                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9617                         IPR_MAX_MSIX_VECTORS);
9618                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9619         }
9620
9621         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9622                         ipr_enable_msix(ioa_cfg) == 0)
9623                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9624         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9625                         ipr_enable_msi(ioa_cfg) == 0)
9626                 ioa_cfg->intr_flag = IPR_USE_MSI;
9627         else {
9628                 ioa_cfg->intr_flag = IPR_USE_LSI;
9629                 ioa_cfg->nvectors = 1;
9630                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9631         }
9632
9633         pci_set_master(pdev);
9634
9635         if (pci_channel_offline(pdev)) {
9636                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9637                 pci_set_master(pdev);
9638                 if (pci_channel_offline(pdev)) {
9639                         rc = -EIO;
9640                         goto out_msi_disable;
9641                 }
9642         }
9643
9644         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9645             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9646                 rc = ipr_test_msi(ioa_cfg, pdev);
9647                 if (rc == -EOPNOTSUPP) {
9648                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9649                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9650                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9651                                 pci_disable_msi(pdev);
9652                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9653                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9654                                 pci_disable_msix(pdev);
9655                         }
9656
9657                         ioa_cfg->intr_flag = IPR_USE_LSI;
9658                         ioa_cfg->nvectors = 1;
9659                 }
9660                 else if (rc)
9661                         goto out_msi_disable;
9662                 else {
9663                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9664                                 dev_info(&pdev->dev,
9665                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9666                                         ioa_cfg->nvectors, pdev->irq);
9667                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9668                                 dev_info(&pdev->dev,
9669                                         "Request for %d MSIXs succeeded.",
9670                                         ioa_cfg->nvectors);
9671                 }
9672         }
9673
9674         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9675                                 (unsigned int)num_online_cpus(),
9676                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9677
9678         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9679                 goto out_msi_disable;
9680
9681         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9682                 goto out_msi_disable;
9683
9684         rc = ipr_alloc_mem(ioa_cfg);
9685         if (rc < 0) {
9686                 dev_err(&pdev->dev,
9687                         "Couldn't allocate enough memory for device driver!\n");
9688                 goto out_msi_disable;
9689         }
9690
9691         /* Save away PCI config space for use following IOA reset */
9692         rc = pci_save_state(pdev);
9693
9694         if (rc != PCIBIOS_SUCCESSFUL) {
9695                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9696                 rc = -EIO;
9697                 goto cleanup_nolog;
9698         }
9699
9700         /*
9701          * If HRRQ updated interrupt is not masked, or reset alert is set,
9702          * the card is in an unknown state and needs a hard reset
9703          */
9704         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9705         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9706         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9707         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9708                 ioa_cfg->needs_hard_reset = 1;
9709         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9710                 ioa_cfg->needs_hard_reset = 1;
9711         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9712                 ioa_cfg->ioa_unit_checked = 1;
9713
9714         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9715         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9716         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9717
9718         if (ioa_cfg->intr_flag == IPR_USE_MSI
9719                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9720                 name_msi_vectors(ioa_cfg);
9721                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9722                         0,
9723                         ioa_cfg->vectors_info[0].desc,
9724                         &ioa_cfg->hrrq[0]);
9725                 if (!rc)
9726                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9727         } else {
9728                 rc = request_irq(pdev->irq, ipr_isr,
9729                          IRQF_SHARED,
9730                          IPR_NAME, &ioa_cfg->hrrq[0]);
9731         }
9732         if (rc) {
9733                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9734                         pdev->irq, rc);
9735                 goto cleanup_nolog;
9736         }
9737
9738         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9739             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9740                 ioa_cfg->needs_warm_reset = 1;
9741                 ioa_cfg->reset = ipr_reset_slot_reset;
9742         } else
9743                 ioa_cfg->reset = ipr_reset_start_bist;
9744
9745         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9746         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9747         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9748
9749         LEAVE;
9750 out:
9751         return rc;
9752
9753 cleanup_nolog:
9754         ipr_free_mem(ioa_cfg);
9755 out_msi_disable:
9756         ipr_wait_for_pci_err_recovery(ioa_cfg);
9757         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9758                 pci_disable_msi(pdev);
9759         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9760                 pci_disable_msix(pdev);
9761 cleanup_nomem:
9762         iounmap(ipr_regs);
9763 out_disable:
9764         pci_disable_device(pdev);
9765 out_release_regions:
9766         pci_release_regions(pdev);
9767 out_scsi_host_put:
9768         scsi_host_put(host);
9769         goto out;
9770 }
9771
9772 /**
9773  * ipr_scan_vsets - Scans for VSET devices
9774  * @ioa_cfg:    ioa config struct
9775  *
9776  * Description: Since the VSET resources do not follow SAM in that we can have
9777  * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9778  *
9779  * Return value:
9780  *      none
9781  **/
9782 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9783 {
9784         int target, lun;
9785
9786         for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9787                 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9788                         scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9789 }
9790
9791 /**
9792  * ipr_initiate_ioa_bringdown - Bring down an adapter
9793  * @ioa_cfg:            ioa config struct
9794  * @shutdown_type:      shutdown type
9795  *
9796  * Description: This function will initiate bringing down the adapter.
9797  * This consists of issuing an IOA shutdown to the adapter
9798  * to flush the cache, and running BIST.
9799  * If the caller needs to wait on the completion of the reset,
9800  * the caller must sleep on the reset_wait_q.
9801  *
9802  * Return value:
9803  *      none
9804  **/
9805 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9806                                        enum ipr_shutdown_type shutdown_type)
9807 {
9808         ENTER;
9809         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9810                 ioa_cfg->sdt_state = ABORT_DUMP;
9811         ioa_cfg->reset_retries = 0;
9812         ioa_cfg->in_ioa_bringdown = 1;
9813         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9814         LEAVE;
9815 }
9816
9817 /**
9818  * __ipr_remove - Remove a single adapter
9819  * @pdev:       pci device struct
9820  *
9821  * Adapter hot plug remove entry point.
9822  *
9823  * Return value:
9824  *      none
9825  **/
9826 static void __ipr_remove(struct pci_dev *pdev)
9827 {
9828         unsigned long host_lock_flags = 0;
9829         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9830         int i;
9831         unsigned long driver_lock_flags;
9832         ENTER;
9833
9834         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9835         while (ioa_cfg->in_reset_reload) {
9836                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9837                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9838                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9839         }
9840
9841         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9842                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9843                 ioa_cfg->hrrq[i].removing_ioa = 1;
9844                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9845         }
9846         wmb();
9847         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9848
9849         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9850         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9851         flush_work(&ioa_cfg->work_q);
9852         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9853         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9854
9855         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9856         list_del(&ioa_cfg->queue);
9857         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9858
9859         if (ioa_cfg->sdt_state == ABORT_DUMP)
9860                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9861         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9862
9863         ipr_free_all_resources(ioa_cfg);
9864
9865         LEAVE;
9866 }
9867
9868 /**
9869  * ipr_remove - IOA hot plug remove entry point
9870  * @pdev:       pci device struct
9871  *
9872  * Adapter hot plug remove entry point.
9873  *
9874  * Return value:
9875  *      none
9876  **/
9877 static void ipr_remove(struct pci_dev *pdev)
9878 {
9879         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9880
9881         ENTER;
9882
9883         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9884                               &ipr_trace_attr);
9885         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9886                              &ipr_dump_attr);
9887         scsi_remove_host(ioa_cfg->host);
9888
9889         __ipr_remove(pdev);
9890
9891         LEAVE;
9892 }
9893
9894 /**
9895  * ipr_probe - Adapter hot plug add entry point
9896  *
9897  * Return value:
9898  *      0 on success / non-zero on failure
9899  **/
9900 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9901 {
9902         struct ipr_ioa_cfg *ioa_cfg;
9903         int rc, i;
9904
9905         rc = ipr_probe_ioa(pdev, dev_id);
9906
9907         if (rc)
9908                 return rc;
9909
9910         ioa_cfg = pci_get_drvdata(pdev);
9911         rc = ipr_probe_ioa_part2(ioa_cfg);
9912
9913         if (rc) {
9914                 __ipr_remove(pdev);
9915                 return rc;
9916         }
9917
9918         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9919
9920         if (rc) {
9921                 __ipr_remove(pdev);
9922                 return rc;
9923         }
9924
9925         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9926                                    &ipr_trace_attr);
9927
9928         if (rc) {
9929                 scsi_remove_host(ioa_cfg->host);
9930                 __ipr_remove(pdev);
9931                 return rc;
9932         }
9933
9934         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9935                                    &ipr_dump_attr);
9936
9937         if (rc) {
9938                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9939                                       &ipr_trace_attr);
9940                 scsi_remove_host(ioa_cfg->host);
9941                 __ipr_remove(pdev);
9942                 return rc;
9943         }
9944
9945         scsi_scan_host(ioa_cfg->host);
9946         ipr_scan_vsets(ioa_cfg);
9947         scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9948         ioa_cfg->allow_ml_add_del = 1;
9949         ioa_cfg->host->max_channel = IPR_VSET_BUS;
9950         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9951
9952         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9953                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9954                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9955                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9956                                         ioa_cfg->iopoll_weight, ipr_iopoll);
9957                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
9958                 }
9959         }
9960
9961         schedule_work(&ioa_cfg->work_q);
9962         return 0;
9963 }
9964
9965 /**
9966  * ipr_shutdown - Shutdown handler.
9967  * @pdev:       pci device struct
9968  *
9969  * This function is invoked upon system shutdown/reboot. It will issue
9970  * an adapter shutdown to the adapter to flush the write cache.
9971  *
9972  * Return value:
9973  *      none
9974  **/
9975 static void ipr_shutdown(struct pci_dev *pdev)
9976 {
9977         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9978         unsigned long lock_flags = 0;
9979         int i;
9980
9981         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9982         if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
9983                         ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9984                 ioa_cfg->iopoll_weight = 0;
9985                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9986                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
9987         }
9988
9989         while (ioa_cfg->in_reset_reload) {
9990                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9991                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9992                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9993         }
9994
9995         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9996         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9997         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9998 }
9999
10000 static struct pci_device_id ipr_pci_table[] = {
10001         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10002                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10003         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10004                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10005         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10006                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10007         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10008                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10009         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10010                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10011         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10012                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10013         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10014                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10015         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10016                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10017                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10018         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10019               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10020         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10021               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10022               IPR_USE_LONG_TRANSOP_TIMEOUT },
10023         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10024               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10025               IPR_USE_LONG_TRANSOP_TIMEOUT },
10026         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10027               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10028         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10029               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10030               IPR_USE_LONG_TRANSOP_TIMEOUT},
10031         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10032               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10033               IPR_USE_LONG_TRANSOP_TIMEOUT },
10034         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10035               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10036               IPR_USE_LONG_TRANSOP_TIMEOUT },
10037         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10038               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10039         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10040               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10041         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10042               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10043               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10044         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10045                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10046         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10047                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10048         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10049                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10050                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10051         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10052                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10053                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10054         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10055                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10056         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10057                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10058         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10059                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10060         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10061                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10062         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10063                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10064         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10065                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10066         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10067                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10068         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10069                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10070         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10071                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10072         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10073                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10074         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10075                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10076         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10077                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10078         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10079                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10080         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10081                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10082         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10083                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10084         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10085                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10086         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10087                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10088         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10089                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10090         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10091                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10092         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10093                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10094         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10095                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10096         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10097                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10098         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10099                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10100         { }
10101 };
10102 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10103
10104 static const struct pci_error_handlers ipr_err_handler = {
10105         .error_detected = ipr_pci_error_detected,
10106         .mmio_enabled = ipr_pci_mmio_enabled,
10107         .slot_reset = ipr_pci_slot_reset,
10108 };
10109
10110 static struct pci_driver ipr_driver = {
10111         .name = IPR_NAME,
10112         .id_table = ipr_pci_table,
10113         .probe = ipr_probe,
10114         .remove = ipr_remove,
10115         .shutdown = ipr_shutdown,
10116         .err_handler = &ipr_err_handler,
10117 };
10118
10119 /**
10120  * ipr_halt_done - Shutdown prepare completion
10121  *
10122  * Return value:
10123  *      none
10124  **/
10125 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10126 {
10127         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10128 }
10129
10130 /**
10131  * ipr_halt - Issue shutdown prepare to all adapters
10132  *
10133  * Return value:
10134  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10135  **/
10136 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10137 {
10138         struct ipr_cmnd *ipr_cmd;
10139         struct ipr_ioa_cfg *ioa_cfg;
10140         unsigned long flags = 0, driver_lock_flags;
10141
10142         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10143                 return NOTIFY_DONE;
10144
10145         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10146
10147         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10148                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10149                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10150                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10151                         continue;
10152                 }
10153
10154                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10155                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10156                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10157                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10158                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10159
10160                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10161                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10162         }
10163         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10164
10165         return NOTIFY_OK;
10166 }
10167
10168 static struct notifier_block ipr_notifier = {
10169         ipr_halt, NULL, 0
10170 };
10171
10172 /**
10173  * ipr_init - Module entry point
10174  *
10175  * Return value:
10176  *      0 on success / negative value on failure
10177  **/
10178 static int __init ipr_init(void)
10179 {
10180         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10181                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10182
10183         register_reboot_notifier(&ipr_notifier);
10184         return pci_register_driver(&ipr_driver);
10185 }
10186
10187 /**
10188  * ipr_exit - Module unload
10189  *
10190  * Module unload entry point.
10191  *
10192  * Return value:
10193  *      none
10194  **/
10195 static void __exit ipr_exit(void)
10196 {
10197         unregister_reboot_notifier(&ipr_notifier);
10198         pci_unregister_driver(&ipr_driver);
10199 }
10200
10201 module_init(ipr_init);
10202 module_exit(ipr_exit);