]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/ipr.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
198 };
199
200 static int ipr_max_bus_speeds[] = {
201         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202 };
203
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed, ipr_max_speed, uint, 0);
207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level, ipr_log_level, uint, 0);
209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode, ipr_testmode, int, 0);
211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs, ipr_max_devs, int, 0);
221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION);
229
230 /*  A constant array of IOASCs/URCs/Error Messages */
231 static const
232 struct ipr_error_table_t ipr_error_table[] = {
233         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
234         "8155: An unknown error was received"},
235         {0x00330000, 0, 0,
236         "Soft underlength error"},
237         {0x005A0000, 0, 0,
238         "Command to be cancelled not found"},
239         {0x00808000, 0, 0,
240         "Qualified success"},
241         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
242         "FFFE: Soft device bus error recovered by the IOA"},
243         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
244         "4101: Soft device bus fabric error"},
245         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246         "FFFC: Logical block guard error recovered by the device"},
247         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248         "FFFC: Logical block reference tag error recovered by the device"},
249         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250         "4171: Recovered scatter list tag / sequence number error"},
251         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256         "FFFD: Recovered logical block reference tag error detected by the IOA"},
257         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258         "FFFD: Logical block guard error recovered by the IOA"},
259         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
260         "FFF9: Device sector reassign successful"},
261         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
262         "FFF7: Media error recovered by device rewrite procedures"},
263         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
264         "7001: IOA sector reassignment successful"},
265         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
266         "FFF9: Soft media error. Sector reassignment recommended"},
267         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
268         "FFF7: Media error recovered by IOA rewrite procedures"},
269         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
270         "FF3D: Soft PCI bus error recovered by the IOA"},
271         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
272         "FFF6: Device hardware error recovered by the IOA"},
273         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
274         "FFF6: Device hardware error recovered by the device"},
275         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
276         "FF3D: Soft IOA error recovered by the IOA"},
277         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
278         "FFFA: Undefined device response recovered by the IOA"},
279         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280         "FFF6: Device bus error, message or command phase"},
281         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
282         "FFFE: Task Management Function failed"},
283         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
284         "FFF6: Failure prediction threshold exceeded"},
285         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
286         "8009: Impending cache battery pack failure"},
287         {0x02040100, 0, 0,
288         "Logical Unit in process of becoming ready"},
289         {0x02040200, 0, 0,
290         "Initializing command required"},
291         {0x02040400, 0, 0,
292         "34FF: Disk device format in progress"},
293         {0x02040C00, 0, 0,
294         "Logical unit not accessible, target port in unavailable state"},
295         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296         "9070: IOA requested reset"},
297         {0x023F0000, 0, 0,
298         "Synchronization required"},
299         {0x02408500, 0, 0,
300         "IOA microcode download required"},
301         {0x02408600, 0, 0,
302         "Device bus connection is prohibited by host"},
303         {0x024E0000, 0, 0,
304         "No ready, IOA shutdown"},
305         {0x025A0000, 0, 0,
306         "Not ready, IOA has been shutdown"},
307         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
308         "3020: Storage subsystem configuration error"},
309         {0x03110B00, 0, 0,
310         "FFF5: Medium error, data unreadable, recommend reassign"},
311         {0x03110C00, 0, 0,
312         "7000: Medium error, data unreadable, do not reassign"},
313         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
314         "FFF3: Disk media format bad"},
315         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
316         "3002: Addressed device failed to respond to selection"},
317         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
318         "3100: Device bus error"},
319         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
320         "3109: IOA timed out a device command"},
321         {0x04088000, 0, 0,
322         "3120: SCSI bus is not operational"},
323         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
324         "4100: Hard device bus fabric error"},
325         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326         "310C: Logical block guard error detected by the device"},
327         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328         "310C: Logical block reference tag error detected by the device"},
329         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330         "4170: Scatter list tag / sequence number error"},
331         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332         "8150: Logical block CRC error on IOA to Host transfer"},
333         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334         "4170: Logical block sequence number error on IOA to Host transfer"},
335         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336         "310D: Logical block reference tag error detected by the IOA"},
337         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338         "310D: Logical block guard error detected by the IOA"},
339         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
340         "9000: IOA reserved area data check"},
341         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
342         "9001: IOA reserved area invalid data pattern"},
343         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
344         "9002: IOA reserved area LRC error"},
345         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346         "Hardware Error, IOA metadata access error"},
347         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
348         "102E: Out of alternate sectors for disk storage"},
349         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
350         "FFF4: Data transfer underlength error"},
351         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
352         "FFF4: Data transfer overlength error"},
353         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
354         "3400: Logical unit failure"},
355         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
356         "FFF4: Device microcode is corrupt"},
357         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
358         "8150: PCI bus error"},
359         {0x04430000, 1, 0,
360         "Unsupported device bus message received"},
361         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
362         "FFF4: Disk device problem"},
363         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
364         "8150: Permanent IOA failure"},
365         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
366         "3010: Disk device returned wrong response to IOA"},
367         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
368         "8151: IOA microcode error"},
369         {0x04448500, 0, 0,
370         "Device bus status error"},
371         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
372         "8157: IOA error requiring IOA reset to recover"},
373         {0x04448700, 0, 0,
374         "ATA device status error"},
375         {0x04490000, 0, 0,
376         "Message reject received from the device"},
377         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
378         "8008: A permanent cache battery pack failure occurred"},
379         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
380         "9090: Disk unit has been modified after the last known status"},
381         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
382         "9081: IOA detected device error"},
383         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
384         "9082: IOA detected device error"},
385         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
386         "3110: Device bus error, message or command phase"},
387         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
388         "3110: SAS Command / Task Management Function failed"},
389         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
390         "9091: Incorrect hardware configuration change has been detected"},
391         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
392         "9073: Invalid multi-adapter configuration"},
393         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
394         "4010: Incorrect connection between cascaded expanders"},
395         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
396         "4020: Connections exceed IOA design limits"},
397         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
398         "4030: Incorrect multipath connection"},
399         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
400         "4110: Unsupported enclosure function"},
401         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402         "4120: SAS cable VPD cannot be read"},
403         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
404         "FFF4: Command to logical unit failed"},
405         {0x05240000, 1, 0,
406         "Illegal request, invalid request type or request packet"},
407         {0x05250000, 0, 0,
408         "Illegal request, invalid resource handle"},
409         {0x05258000, 0, 0,
410         "Illegal request, commands not allowed to this device"},
411         {0x05258100, 0, 0,
412         "Illegal request, command not allowed to a secondary adapter"},
413         {0x05258200, 0, 0,
414         "Illegal request, command not allowed to a non-optimized resource"},
415         {0x05260000, 0, 0,
416         "Illegal request, invalid field in parameter list"},
417         {0x05260100, 0, 0,
418         "Illegal request, parameter not supported"},
419         {0x05260200, 0, 0,
420         "Illegal request, parameter value invalid"},
421         {0x052C0000, 0, 0,
422         "Illegal request, command sequence error"},
423         {0x052C8000, 1, 0,
424         "Illegal request, dual adapter support not enabled"},
425         {0x052C8100, 1, 0,
426         "Illegal request, another cable connector was physically disabled"},
427         {0x054E8000, 1, 0,
428         "Illegal request, inconsistent group id/group count"},
429         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
430         "9031: Array protection temporarily suspended, protection resuming"},
431         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
432         "9040: Array protection temporarily suspended, protection resuming"},
433         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434         "4080: IOA exceeded maximum operating temperature"},
435         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436         "4085: Service required"},
437         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
438         "3140: Device bus not ready to ready transition"},
439         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
440         "FFFB: SCSI bus was reset"},
441         {0x06290500, 0, 0,
442         "FFFE: SCSI bus transition to single ended"},
443         {0x06290600, 0, 0,
444         "FFFE: SCSI bus transition to LVD"},
445         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
446         "FFFB: SCSI bus was reset by another initiator"},
447         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
448         "3029: A device replacement has occurred"},
449         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450         "4102: Device bus fabric performance degradation"},
451         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
452         "9051: IOA cache data exists for a missing or failed device"},
453         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
454         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
456         "9025: Disk unit is not supported at its physical location"},
457         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
458         "3020: IOA detected a SCSI bus configuration error"},
459         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
460         "3150: SCSI bus configuration error"},
461         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
462         "9074: Asymmetric advanced function disk configuration"},
463         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
464         "4040: Incomplete multipath connection between IOA and enclosure"},
465         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
466         "4041: Incomplete multipath connection between enclosure and device"},
467         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
468         "9075: Incomplete multipath connection between IOA and remote IOA"},
469         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
470         "9076: Configuration error, missing remote IOA"},
471         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
472         "4050: Enclosure does not support a required multipath function"},
473         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474         "4121: Configuration error, required cable is missing"},
475         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476         "4122: Cable is not plugged into the correct location on remote IOA"},
477         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478         "4123: Configuration error, invalid cable vital product data"},
479         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480         "4124: Configuration error, both cable ends are plugged into the same IOA"},
481         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482         "4070: Logically bad block written on device"},
483         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
484         "9041: Array protection temporarily suspended"},
485         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
486         "9042: Corrupt array parity detected on specified device"},
487         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
488         "9030: Array no longer protected due to missing or failed disk unit"},
489         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
490         "9071: Link operational transition"},
491         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
492         "9072: Link not operational transition"},
493         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
494         "9032: Array exposed but still protected"},
495         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496         "70DD: Device forced failed by disrupt device command"},
497         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
498         "4061: Multipath redundancy level got better"},
499         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
500         "4060: Multipath redundancy level got worse"},
501         {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
502         "9083: Device raw mode enabled"},
503         {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
504         "9084: Device raw mode disabled"},
505         {0x07270000, 0, 0,
506         "Failure due to other device"},
507         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
508         "9008: IOA does not support functions expected by devices"},
509         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
510         "9010: Cache data associated with attached devices cannot be found"},
511         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
512         "9011: Cache data belongs to devices other than those attached"},
513         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
514         "9020: Array missing 2 or more devices with only 1 device present"},
515         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
516         "9021: Array missing 2 or more devices with 2 or more devices present"},
517         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
518         "9022: Exposed array is missing a required device"},
519         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
520         "9023: Array member(s) not at required physical locations"},
521         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
522         "9024: Array not functional due to present hardware configuration"},
523         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
524         "9026: Array not functional due to present hardware configuration"},
525         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
526         "9027: Array is missing a device and parity is out of sync"},
527         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
528         "9028: Maximum number of arrays already exist"},
529         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
530         "9050: Required cache data cannot be located for a disk unit"},
531         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
532         "9052: Cache data exists for a device that has been modified"},
533         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
534         "9054: IOA resources not available due to previous problems"},
535         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
536         "9092: Disk unit requires initialization before use"},
537         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
538         "9029: Incorrect hardware configuration change has been detected"},
539         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
540         "9060: One or more disk pairs are missing from an array"},
541         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
542         "9061: One or more disks are missing from an array"},
543         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
544         "9062: One or more disks are missing from an array"},
545         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
546         "9063: Maximum number of functional arrays has been exceeded"},
547         {0x07279A00, 0, 0,
548         "Data protect, other volume set problem"},
549         {0x0B260000, 0, 0,
550         "Aborted command, invalid descriptor"},
551         {0x0B3F9000, 0, 0,
552         "Target operating conditions have changed, dual adapter takeover"},
553         {0x0B530200, 0, 0,
554         "Aborted command, medium removal prevented"},
555         {0x0B5A0000, 0, 0,
556         "Command terminated by host"},
557         {0x0B5B8000, 0, 0,
558         "Aborted command, command terminated by host"}
559 };
560
561 static const struct ipr_ses_table_entry ipr_ses_table[] = {
562         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
563         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
569         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
575 };
576
577 /*
578  *  Function Prototypes
579  */
580 static int ipr_reset_alert(struct ipr_cmnd *);
581 static void ipr_process_ccn(struct ipr_cmnd *);
582 static void ipr_process_error(struct ipr_cmnd *);
583 static void ipr_reset_ioa_job(struct ipr_cmnd *);
584 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
585                                    enum ipr_shutdown_type);
586
587 #ifdef CONFIG_SCSI_IPR_TRACE
588 /**
589  * ipr_trc_hook - Add a trace entry to the driver trace
590  * @ipr_cmd:    ipr command struct
591  * @type:               trace type
592  * @add_data:   additional data
593  *
594  * Return value:
595  *      none
596  **/
597 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
598                          u8 type, u32 add_data)
599 {
600         struct ipr_trace_entry *trace_entry;
601         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602         unsigned int trace_index;
603
604         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
605         trace_entry = &ioa_cfg->trace[trace_index];
606         trace_entry->time = jiffies;
607         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
608         trace_entry->type = type;
609         if (ipr_cmd->ioa_cfg->sis64)
610                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
611         else
612                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
613         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
614         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
615         trace_entry->u.add_data = add_data;
616         wmb();
617 }
618 #else
619 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
620 #endif
621
622 /**
623  * ipr_lock_and_done - Acquire lock and complete command
624  * @ipr_cmd:    ipr command struct
625  *
626  * Return value:
627  *      none
628  **/
629 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
630 {
631         unsigned long lock_flags;
632         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
633
634         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
635         ipr_cmd->done(ipr_cmd);
636         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
637 }
638
639 /**
640  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
641  * @ipr_cmd:    ipr command struct
642  *
643  * Return value:
644  *      none
645  **/
646 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
647 {
648         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
649         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
650         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
651         dma_addr_t dma_addr = ipr_cmd->dma_addr;
652         int hrrq_id;
653
654         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
655         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
656         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
657         ioarcb->data_transfer_length = 0;
658         ioarcb->read_data_transfer_length = 0;
659         ioarcb->ioadl_len = 0;
660         ioarcb->read_ioadl_len = 0;
661
662         if (ipr_cmd->ioa_cfg->sis64) {
663                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
664                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
665                 ioasa64->u.gata.status = 0;
666         } else {
667                 ioarcb->write_ioadl_addr =
668                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
669                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
670                 ioasa->u.gata.status = 0;
671         }
672
673         ioasa->hdr.ioasc = 0;
674         ioasa->hdr.residual_data_len = 0;
675         ipr_cmd->scsi_cmd = NULL;
676         ipr_cmd->qc = NULL;
677         ipr_cmd->sense_buffer[0] = 0;
678         ipr_cmd->dma_use_sg = 0;
679 }
680
681 /**
682  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
683  * @ipr_cmd:    ipr command struct
684  *
685  * Return value:
686  *      none
687  **/
688 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
689                               void (*fast_done) (struct ipr_cmnd *))
690 {
691         ipr_reinit_ipr_cmnd(ipr_cmd);
692         ipr_cmd->u.scratch = 0;
693         ipr_cmd->sibling = NULL;
694         ipr_cmd->eh_comp = NULL;
695         ipr_cmd->fast_done = fast_done;
696         init_timer(&ipr_cmd->timer);
697 }
698
699 /**
700  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
701  * @ioa_cfg:    ioa config struct
702  *
703  * Return value:
704  *      pointer to ipr command struct
705  **/
706 static
707 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
708 {
709         struct ipr_cmnd *ipr_cmd = NULL;
710
711         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
712                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
713                         struct ipr_cmnd, queue);
714                 list_del(&ipr_cmd->queue);
715         }
716
717
718         return ipr_cmd;
719 }
720
721 /**
722  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
723  * @ioa_cfg:    ioa config struct
724  *
725  * Return value:
726  *      pointer to ipr command struct
727  **/
728 static
729 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
730 {
731         struct ipr_cmnd *ipr_cmd =
732                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
733         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
734         return ipr_cmd;
735 }
736
737 /**
738  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
739  * @ioa_cfg:    ioa config struct
740  * @clr_ints:     interrupts to clear
741  *
742  * This function masks all interrupts on the adapter, then clears the
743  * interrupts specified in the mask
744  *
745  * Return value:
746  *      none
747  **/
748 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
749                                           u32 clr_ints)
750 {
751         volatile u32 int_reg;
752         int i;
753
754         /* Stop new interrupts */
755         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
756                 spin_lock(&ioa_cfg->hrrq[i]._lock);
757                 ioa_cfg->hrrq[i].allow_interrupts = 0;
758                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
759         }
760         wmb();
761
762         /* Set interrupt mask to stop all new interrupts */
763         if (ioa_cfg->sis64)
764                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
765         else
766                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767
768         /* Clear any pending interrupts */
769         if (ioa_cfg->sis64)
770                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
771         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
772         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
773 }
774
775 /**
776  * ipr_save_pcix_cmd_reg - Save PCI-X command register
777  * @ioa_cfg:    ioa config struct
778  *
779  * Return value:
780  *      0 on success / -EIO on failure
781  **/
782 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
783 {
784         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
785
786         if (pcix_cmd_reg == 0)
787                 return 0;
788
789         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
790                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
791                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
792                 return -EIO;
793         }
794
795         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
796         return 0;
797 }
798
799 /**
800  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
801  * @ioa_cfg:    ioa config struct
802  *
803  * Return value:
804  *      0 on success / -EIO on failure
805  **/
806 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
807 {
808         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
809
810         if (pcix_cmd_reg) {
811                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
812                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
813                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
814                         return -EIO;
815                 }
816         }
817
818         return 0;
819 }
820
821 /**
822  * ipr_sata_eh_done - done function for aborted SATA commands
823  * @ipr_cmd:    ipr command struct
824  *
825  * This function is invoked for ops generated to SATA
826  * devices which are being aborted.
827  *
828  * Return value:
829  *      none
830  **/
831 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
832 {
833         struct ata_queued_cmd *qc = ipr_cmd->qc;
834         struct ipr_sata_port *sata_port = qc->ap->private_data;
835
836         qc->err_mask |= AC_ERR_OTHER;
837         sata_port->ioasa.status |= ATA_BUSY;
838         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
839         ata_qc_complete(qc);
840 }
841
842 /**
843  * ipr_scsi_eh_done - mid-layer done function for aborted ops
844  * @ipr_cmd:    ipr command struct
845  *
846  * This function is invoked by the interrupt handler for
847  * ops generated by the SCSI mid-layer which are being aborted.
848  *
849  * Return value:
850  *      none
851  **/
852 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
853 {
854         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
855
856         scsi_cmd->result |= (DID_ERROR << 16);
857
858         scsi_dma_unmap(ipr_cmd->scsi_cmd);
859         scsi_cmd->scsi_done(scsi_cmd);
860         if (ipr_cmd->eh_comp)
861                 complete(ipr_cmd->eh_comp);
862         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
863 }
864
865 /**
866  * ipr_fail_all_ops - Fails all outstanding ops.
867  * @ioa_cfg:    ioa config struct
868  *
869  * This function fails all outstanding ops.
870  *
871  * Return value:
872  *      none
873  **/
874 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
875 {
876         struct ipr_cmnd *ipr_cmd, *temp;
877         struct ipr_hrr_queue *hrrq;
878
879         ENTER;
880         for_each_hrrq(hrrq, ioa_cfg) {
881                 spin_lock(&hrrq->_lock);
882                 list_for_each_entry_safe(ipr_cmd,
883                                         temp, &hrrq->hrrq_pending_q, queue) {
884                         list_del(&ipr_cmd->queue);
885
886                         ipr_cmd->s.ioasa.hdr.ioasc =
887                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
888                         ipr_cmd->s.ioasa.hdr.ilid =
889                                 cpu_to_be32(IPR_DRIVER_ILID);
890
891                         if (ipr_cmd->scsi_cmd)
892                                 ipr_cmd->done = ipr_scsi_eh_done;
893                         else if (ipr_cmd->qc)
894                                 ipr_cmd->done = ipr_sata_eh_done;
895
896                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
897                                      IPR_IOASC_IOA_WAS_RESET);
898                         del_timer(&ipr_cmd->timer);
899                         ipr_cmd->done(ipr_cmd);
900                 }
901                 spin_unlock(&hrrq->_lock);
902         }
903         LEAVE;
904 }
905
906 /**
907  * ipr_send_command -  Send driver initiated requests.
908  * @ipr_cmd:            ipr command struct
909  *
910  * This function sends a command to the adapter using the correct write call.
911  * In the case of sis64, calculate the ioarcb size required. Then or in the
912  * appropriate bits.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
918 {
919         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
920         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
921
922         if (ioa_cfg->sis64) {
923                 /* The default size is 256 bytes */
924                 send_dma_addr |= 0x1;
925
926                 /* If the number of ioadls * size of ioadl > 128 bytes,
927                    then use a 512 byte ioarcb */
928                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
929                         send_dma_addr |= 0x4;
930                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
931         } else
932                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
933 }
934
935 /**
936  * ipr_do_req -  Send driver initiated requests.
937  * @ipr_cmd:            ipr command struct
938  * @done:                       done function
939  * @timeout_func:       timeout function
940  * @timeout:            timeout value
941  *
942  * This function sends the specified command to the adapter with the
943  * timeout given. The done function is invoked on command completion.
944  *
945  * Return value:
946  *      none
947  **/
948 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
949                        void (*done) (struct ipr_cmnd *),
950                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
951 {
952         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
953
954         ipr_cmd->done = done;
955
956         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
957         ipr_cmd->timer.expires = jiffies + timeout;
958         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
959
960         add_timer(&ipr_cmd->timer);
961
962         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
963
964         ipr_send_command(ipr_cmd);
965 }
966
967 /**
968  * ipr_internal_cmd_done - Op done function for an internally generated op.
969  * @ipr_cmd:    ipr command struct
970  *
971  * This function is the op done function for an internally generated,
972  * blocking op. It simply wakes the sleeping thread.
973  *
974  * Return value:
975  *      none
976  **/
977 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
978 {
979         if (ipr_cmd->sibling)
980                 ipr_cmd->sibling = NULL;
981         else
982                 complete(&ipr_cmd->completion);
983 }
984
985 /**
986  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
987  * @ipr_cmd:    ipr command struct
988  * @dma_addr:   dma address
989  * @len:        transfer length
990  * @flags:      ioadl flag value
991  *
992  * This function initializes an ioadl in the case where there is only a single
993  * descriptor.
994  *
995  * Return value:
996  *      nothing
997  **/
998 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
999                            u32 len, int flags)
1000 {
1001         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1002         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1003
1004         ipr_cmd->dma_use_sg = 1;
1005
1006         if (ipr_cmd->ioa_cfg->sis64) {
1007                 ioadl64->flags = cpu_to_be32(flags);
1008                 ioadl64->data_len = cpu_to_be32(len);
1009                 ioadl64->address = cpu_to_be64(dma_addr);
1010
1011                 ipr_cmd->ioarcb.ioadl_len =
1012                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1013                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1014         } else {
1015                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1016                 ioadl->address = cpu_to_be32(dma_addr);
1017
1018                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1019                         ipr_cmd->ioarcb.read_ioadl_len =
1020                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1021                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1022                 } else {
1023                         ipr_cmd->ioarcb.ioadl_len =
1024                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1025                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1026                 }
1027         }
1028 }
1029
1030 /**
1031  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1032  * @ipr_cmd:    ipr command struct
1033  * @timeout_func:       function to invoke if command times out
1034  * @timeout:    timeout
1035  *
1036  * Return value:
1037  *      none
1038  **/
1039 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1040                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1041                                   u32 timeout)
1042 {
1043         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1044
1045         init_completion(&ipr_cmd->completion);
1046         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1047
1048         spin_unlock_irq(ioa_cfg->host->host_lock);
1049         wait_for_completion(&ipr_cmd->completion);
1050         spin_lock_irq(ioa_cfg->host->host_lock);
1051 }
1052
1053 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1054 {
1055         unsigned int hrrq;
1056
1057         if (ioa_cfg->hrrq_num == 1)
1058                 hrrq = 0;
1059         else {
1060                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062         }
1063         return hrrq;
1064 }
1065
1066 /**
1067  * ipr_send_hcam - Send an HCAM to the adapter.
1068  * @ioa_cfg:    ioa config struct
1069  * @type:               HCAM type
1070  * @hostrcb:    hostrcb struct
1071  *
1072  * This function will send a Host Controlled Async command to the adapter.
1073  * If HCAMs are currently not allowed to be issued to the adapter, it will
1074  * place the hostrcb on the free queue.
1075  *
1076  * Return value:
1077  *      none
1078  **/
1079 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1080                           struct ipr_hostrcb *hostrcb)
1081 {
1082         struct ipr_cmnd *ipr_cmd;
1083         struct ipr_ioarcb *ioarcb;
1084
1085         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1086                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1087                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1088                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1089
1090                 ipr_cmd->u.hostrcb = hostrcb;
1091                 ioarcb = &ipr_cmd->ioarcb;
1092
1093                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1094                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1095                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1096                 ioarcb->cmd_pkt.cdb[1] = type;
1097                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1098                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1099
1100                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1101                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1102
1103                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1104                         ipr_cmd->done = ipr_process_ccn;
1105                 else
1106                         ipr_cmd->done = ipr_process_error;
1107
1108                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1109
1110                 ipr_send_command(ipr_cmd);
1111         } else {
1112                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1113         }
1114 }
1115
1116 /**
1117  * ipr_update_ata_class - Update the ata class in the resource entry
1118  * @res:        resource entry struct
1119  * @proto:      cfgte device bus protocol value
1120  *
1121  * Return value:
1122  *      none
1123  **/
1124 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1125 {
1126         switch (proto) {
1127         case IPR_PROTO_SATA:
1128         case IPR_PROTO_SAS_STP:
1129                 res->ata_class = ATA_DEV_ATA;
1130                 break;
1131         case IPR_PROTO_SATA_ATAPI:
1132         case IPR_PROTO_SAS_STP_ATAPI:
1133                 res->ata_class = ATA_DEV_ATAPI;
1134                 break;
1135         default:
1136                 res->ata_class = ATA_DEV_UNKNOWN;
1137                 break;
1138         };
1139 }
1140
1141 /**
1142  * ipr_init_res_entry - Initialize a resource entry struct.
1143  * @res:        resource entry struct
1144  * @cfgtew:     config table entry wrapper struct
1145  *
1146  * Return value:
1147  *      none
1148  **/
1149 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1150                                struct ipr_config_table_entry_wrapper *cfgtew)
1151 {
1152         int found = 0;
1153         unsigned int proto;
1154         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1155         struct ipr_resource_entry *gscsi_res = NULL;
1156
1157         res->needs_sync_complete = 0;
1158         res->in_erp = 0;
1159         res->add_to_ml = 0;
1160         res->del_from_ml = 0;
1161         res->resetting_device = 0;
1162         res->reset_occurred = 0;
1163         res->sdev = NULL;
1164         res->sata_port = NULL;
1165
1166         if (ioa_cfg->sis64) {
1167                 proto = cfgtew->u.cfgte64->proto;
1168                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1169                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1170                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1171                 res->type = cfgtew->u.cfgte64->res_type;
1172
1173                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1174                         sizeof(res->res_path));
1175
1176                 res->bus = 0;
1177                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1178                         sizeof(res->dev_lun.scsi_lun));
1179                 res->lun = scsilun_to_int(&res->dev_lun);
1180
1181                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1182                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1183                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1184                                         found = 1;
1185                                         res->target = gscsi_res->target;
1186                                         break;
1187                                 }
1188                         }
1189                         if (!found) {
1190                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1191                                                                   ioa_cfg->max_devs_supported);
1192                                 set_bit(res->target, ioa_cfg->target_ids);
1193                         }
1194                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1195                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1196                         res->target = 0;
1197                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1198                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1199                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1200                                                           ioa_cfg->max_devs_supported);
1201                         set_bit(res->target, ioa_cfg->array_ids);
1202                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1203                         res->bus = IPR_VSET_VIRTUAL_BUS;
1204                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1205                                                           ioa_cfg->max_devs_supported);
1206                         set_bit(res->target, ioa_cfg->vset_ids);
1207                 } else {
1208                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1209                                                           ioa_cfg->max_devs_supported);
1210                         set_bit(res->target, ioa_cfg->target_ids);
1211                 }
1212         } else {
1213                 proto = cfgtew->u.cfgte->proto;
1214                 res->qmodel = IPR_QUEUEING_MODEL(res);
1215                 res->flags = cfgtew->u.cfgte->flags;
1216                 if (res->flags & IPR_IS_IOA_RESOURCE)
1217                         res->type = IPR_RES_TYPE_IOAFP;
1218                 else
1219                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1220
1221                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1222                 res->target = cfgtew->u.cfgte->res_addr.target;
1223                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1224                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1225         }
1226
1227         ipr_update_ata_class(res, proto);
1228 }
1229
1230 /**
1231  * ipr_is_same_device - Determine if two devices are the same.
1232  * @res:        resource entry struct
1233  * @cfgtew:     config table entry wrapper struct
1234  *
1235  * Return value:
1236  *      1 if the devices are the same / 0 otherwise
1237  **/
1238 static int ipr_is_same_device(struct ipr_resource_entry *res,
1239                               struct ipr_config_table_entry_wrapper *cfgtew)
1240 {
1241         if (res->ioa_cfg->sis64) {
1242                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1243                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1244                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1245                                         sizeof(cfgtew->u.cfgte64->lun))) {
1246                         return 1;
1247                 }
1248         } else {
1249                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1250                     res->target == cfgtew->u.cfgte->res_addr.target &&
1251                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1252                         return 1;
1253         }
1254
1255         return 0;
1256 }
1257
1258 /**
1259  * __ipr_format_res_path - Format the resource path for printing.
1260  * @res_path:   resource path
1261  * @buf:        buffer
1262  * @len:        length of buffer provided
1263  *
1264  * Return value:
1265  *      pointer to buffer
1266  **/
1267 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1268 {
1269         int i;
1270         char *p = buffer;
1271
1272         *p = '\0';
1273         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1274         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1275                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1276
1277         return buffer;
1278 }
1279
1280 /**
1281  * ipr_format_res_path - Format the resource path for printing.
1282  * @ioa_cfg:    ioa config struct
1283  * @res_path:   resource path
1284  * @buf:        buffer
1285  * @len:        length of buffer provided
1286  *
1287  * Return value:
1288  *      pointer to buffer
1289  **/
1290 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1291                                  u8 *res_path, char *buffer, int len)
1292 {
1293         char *p = buffer;
1294
1295         *p = '\0';
1296         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1297         __ipr_format_res_path(res_path, p, len - (buffer - p));
1298         return buffer;
1299 }
1300
1301 /**
1302  * ipr_update_res_entry - Update the resource entry.
1303  * @res:        resource entry struct
1304  * @cfgtew:     config table entry wrapper struct
1305  *
1306  * Return value:
1307  *      none
1308  **/
1309 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1310                                  struct ipr_config_table_entry_wrapper *cfgtew)
1311 {
1312         char buffer[IPR_MAX_RES_PATH_LENGTH];
1313         unsigned int proto;
1314         int new_path = 0;
1315
1316         if (res->ioa_cfg->sis64) {
1317                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1318                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1319                 res->type = cfgtew->u.cfgte64->res_type;
1320
1321                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1322                         sizeof(struct ipr_std_inq_data));
1323
1324                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1325                 proto = cfgtew->u.cfgte64->proto;
1326                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1327                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1328
1329                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1330                         sizeof(res->dev_lun.scsi_lun));
1331
1332                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1333                                         sizeof(res->res_path))) {
1334                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1335                                 sizeof(res->res_path));
1336                         new_path = 1;
1337                 }
1338
1339                 if (res->sdev && new_path)
1340                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1341                                     ipr_format_res_path(res->ioa_cfg,
1342                                         res->res_path, buffer, sizeof(buffer)));
1343         } else {
1344                 res->flags = cfgtew->u.cfgte->flags;
1345                 if (res->flags & IPR_IS_IOA_RESOURCE)
1346                         res->type = IPR_RES_TYPE_IOAFP;
1347                 else
1348                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1349
1350                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1351                         sizeof(struct ipr_std_inq_data));
1352
1353                 res->qmodel = IPR_QUEUEING_MODEL(res);
1354                 proto = cfgtew->u.cfgte->proto;
1355                 res->res_handle = cfgtew->u.cfgte->res_handle;
1356         }
1357
1358         ipr_update_ata_class(res, proto);
1359 }
1360
1361 /**
1362  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1363  *                        for the resource.
1364  * @res:        resource entry struct
1365  * @cfgtew:     config table entry wrapper struct
1366  *
1367  * Return value:
1368  *      none
1369  **/
1370 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1371 {
1372         struct ipr_resource_entry *gscsi_res = NULL;
1373         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1374
1375         if (!ioa_cfg->sis64)
1376                 return;
1377
1378         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1379                 clear_bit(res->target, ioa_cfg->array_ids);
1380         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1381                 clear_bit(res->target, ioa_cfg->vset_ids);
1382         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1383                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1384                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1385                                 return;
1386                 clear_bit(res->target, ioa_cfg->target_ids);
1387
1388         } else if (res->bus == 0)
1389                 clear_bit(res->target, ioa_cfg->target_ids);
1390 }
1391
1392 /**
1393  * ipr_handle_config_change - Handle a config change from the adapter
1394  * @ioa_cfg:    ioa config struct
1395  * @hostrcb:    hostrcb
1396  *
1397  * Return value:
1398  *      none
1399  **/
1400 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1401                                      struct ipr_hostrcb *hostrcb)
1402 {
1403         struct ipr_resource_entry *res = NULL;
1404         struct ipr_config_table_entry_wrapper cfgtew;
1405         __be32 cc_res_handle;
1406
1407         u32 is_ndn = 1;
1408
1409         if (ioa_cfg->sis64) {
1410                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1411                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1412         } else {
1413                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1414                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1415         }
1416
1417         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1418                 if (res->res_handle == cc_res_handle) {
1419                         is_ndn = 0;
1420                         break;
1421                 }
1422         }
1423
1424         if (is_ndn) {
1425                 if (list_empty(&ioa_cfg->free_res_q)) {
1426                         ipr_send_hcam(ioa_cfg,
1427                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1428                                       hostrcb);
1429                         return;
1430                 }
1431
1432                 res = list_entry(ioa_cfg->free_res_q.next,
1433                                  struct ipr_resource_entry, queue);
1434
1435                 list_del(&res->queue);
1436                 ipr_init_res_entry(res, &cfgtew);
1437                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1438         }
1439
1440         ipr_update_res_entry(res, &cfgtew);
1441
1442         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1443                 if (res->sdev) {
1444                         res->del_from_ml = 1;
1445                         res->res_handle = IPR_INVALID_RES_HANDLE;
1446                         schedule_work(&ioa_cfg->work_q);
1447                 } else {
1448                         ipr_clear_res_target(res);
1449                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1450                 }
1451         } else if (!res->sdev || res->del_from_ml) {
1452                 res->add_to_ml = 1;
1453                 schedule_work(&ioa_cfg->work_q);
1454         }
1455
1456         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1457 }
1458
1459 /**
1460  * ipr_process_ccn - Op done function for a CCN.
1461  * @ipr_cmd:    ipr command struct
1462  *
1463  * This function is the op done function for a configuration
1464  * change notification host controlled async from the adapter.
1465  *
1466  * Return value:
1467  *      none
1468  **/
1469 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1470 {
1471         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1472         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1473         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1474
1475         list_del(&hostrcb->queue);
1476         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1477
1478         if (ioasc) {
1479                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1480                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1481                         dev_err(&ioa_cfg->pdev->dev,
1482                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1483
1484                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1485         } else {
1486                 ipr_handle_config_change(ioa_cfg, hostrcb);
1487         }
1488 }
1489
1490 /**
1491  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1492  * @i:          index into buffer
1493  * @buf:                string to modify
1494  *
1495  * This function will strip all trailing whitespace, pad the end
1496  * of the string with a single space, and NULL terminate the string.
1497  *
1498  * Return value:
1499  *      new length of string
1500  **/
1501 static int strip_and_pad_whitespace(int i, char *buf)
1502 {
1503         while (i && buf[i] == ' ')
1504                 i--;
1505         buf[i+1] = ' ';
1506         buf[i+2] = '\0';
1507         return i + 2;
1508 }
1509
1510 /**
1511  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1512  * @prefix:             string to print at start of printk
1513  * @hostrcb:    hostrcb pointer
1514  * @vpd:                vendor/product id/sn struct
1515  *
1516  * Return value:
1517  *      none
1518  **/
1519 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1520                                 struct ipr_vpd *vpd)
1521 {
1522         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1523         int i = 0;
1524
1525         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1526         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1527
1528         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1529         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1530
1531         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1532         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1533
1534         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1535 }
1536
1537 /**
1538  * ipr_log_vpd - Log the passed VPD to the error log.
1539  * @vpd:                vendor/product id/sn struct
1540  *
1541  * Return value:
1542  *      none
1543  **/
1544 static void ipr_log_vpd(struct ipr_vpd *vpd)
1545 {
1546         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1547                     + IPR_SERIAL_NUM_LEN];
1548
1549         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1550         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1551                IPR_PROD_ID_LEN);
1552         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1553         ipr_err("Vendor/Product ID: %s\n", buffer);
1554
1555         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1556         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1557         ipr_err("    Serial Number: %s\n", buffer);
1558 }
1559
1560 /**
1561  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1562  * @prefix:             string to print at start of printk
1563  * @hostrcb:    hostrcb pointer
1564  * @vpd:                vendor/product id/sn/wwn struct
1565  *
1566  * Return value:
1567  *      none
1568  **/
1569 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1570                                     struct ipr_ext_vpd *vpd)
1571 {
1572         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1573         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1574                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1575 }
1576
1577 /**
1578  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1579  * @vpd:                vendor/product id/sn/wwn struct
1580  *
1581  * Return value:
1582  *      none
1583  **/
1584 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1585 {
1586         ipr_log_vpd(&vpd->vpd);
1587         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1588                 be32_to_cpu(vpd->wwid[1]));
1589 }
1590
1591 /**
1592  * ipr_log_enhanced_cache_error - Log a cache error.
1593  * @ioa_cfg:    ioa config struct
1594  * @hostrcb:    hostrcb struct
1595  *
1596  * Return value:
1597  *      none
1598  **/
1599 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1600                                          struct ipr_hostrcb *hostrcb)
1601 {
1602         struct ipr_hostrcb_type_12_error *error;
1603
1604         if (ioa_cfg->sis64)
1605                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1606         else
1607                 error = &hostrcb->hcam.u.error.u.type_12_error;
1608
1609         ipr_err("-----Current Configuration-----\n");
1610         ipr_err("Cache Directory Card Information:\n");
1611         ipr_log_ext_vpd(&error->ioa_vpd);
1612         ipr_err("Adapter Card Information:\n");
1613         ipr_log_ext_vpd(&error->cfc_vpd);
1614
1615         ipr_err("-----Expected Configuration-----\n");
1616         ipr_err("Cache Directory Card Information:\n");
1617         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1618         ipr_err("Adapter Card Information:\n");
1619         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1620
1621         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1622                      be32_to_cpu(error->ioa_data[0]),
1623                      be32_to_cpu(error->ioa_data[1]),
1624                      be32_to_cpu(error->ioa_data[2]));
1625 }
1626
1627 /**
1628  * ipr_log_cache_error - Log a cache error.
1629  * @ioa_cfg:    ioa config struct
1630  * @hostrcb:    hostrcb struct
1631  *
1632  * Return value:
1633  *      none
1634  **/
1635 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1636                                 struct ipr_hostrcb *hostrcb)
1637 {
1638         struct ipr_hostrcb_type_02_error *error =
1639                 &hostrcb->hcam.u.error.u.type_02_error;
1640
1641         ipr_err("-----Current Configuration-----\n");
1642         ipr_err("Cache Directory Card Information:\n");
1643         ipr_log_vpd(&error->ioa_vpd);
1644         ipr_err("Adapter Card Information:\n");
1645         ipr_log_vpd(&error->cfc_vpd);
1646
1647         ipr_err("-----Expected Configuration-----\n");
1648         ipr_err("Cache Directory Card Information:\n");
1649         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1650         ipr_err("Adapter Card Information:\n");
1651         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1652
1653         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1654                      be32_to_cpu(error->ioa_data[0]),
1655                      be32_to_cpu(error->ioa_data[1]),
1656                      be32_to_cpu(error->ioa_data[2]));
1657 }
1658
1659 /**
1660  * ipr_log_enhanced_config_error - Log a configuration error.
1661  * @ioa_cfg:    ioa config struct
1662  * @hostrcb:    hostrcb struct
1663  *
1664  * Return value:
1665  *      none
1666  **/
1667 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1668                                           struct ipr_hostrcb *hostrcb)
1669 {
1670         int errors_logged, i;
1671         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1672         struct ipr_hostrcb_type_13_error *error;
1673
1674         error = &hostrcb->hcam.u.error.u.type_13_error;
1675         errors_logged = be32_to_cpu(error->errors_logged);
1676
1677         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1678                 be32_to_cpu(error->errors_detected), errors_logged);
1679
1680         dev_entry = error->dev;
1681
1682         for (i = 0; i < errors_logged; i++, dev_entry++) {
1683                 ipr_err_separator;
1684
1685                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1686                 ipr_log_ext_vpd(&dev_entry->vpd);
1687
1688                 ipr_err("-----New Device Information-----\n");
1689                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1690
1691                 ipr_err("Cache Directory Card Information:\n");
1692                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1693
1694                 ipr_err("Adapter Card Information:\n");
1695                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1696         }
1697 }
1698
1699 /**
1700  * ipr_log_sis64_config_error - Log a device error.
1701  * @ioa_cfg:    ioa config struct
1702  * @hostrcb:    hostrcb struct
1703  *
1704  * Return value:
1705  *      none
1706  **/
1707 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1708                                        struct ipr_hostrcb *hostrcb)
1709 {
1710         int errors_logged, i;
1711         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1712         struct ipr_hostrcb_type_23_error *error;
1713         char buffer[IPR_MAX_RES_PATH_LENGTH];
1714
1715         error = &hostrcb->hcam.u.error64.u.type_23_error;
1716         errors_logged = be32_to_cpu(error->errors_logged);
1717
1718         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1719                 be32_to_cpu(error->errors_detected), errors_logged);
1720
1721         dev_entry = error->dev;
1722
1723         for (i = 0; i < errors_logged; i++, dev_entry++) {
1724                 ipr_err_separator;
1725
1726                 ipr_err("Device %d : %s", i + 1,
1727                         __ipr_format_res_path(dev_entry->res_path,
1728                                               buffer, sizeof(buffer)));
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_config_error - Log a configuration error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                  struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb_device_data_entry *dev_entry;
1755         struct ipr_hostrcb_type_03_error *error;
1756
1757         error = &hostrcb->hcam.u.error.u.type_03_error;
1758         errors_logged = be32_to_cpu(error->errors_logged);
1759
1760         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1761                 be32_to_cpu(error->errors_detected), errors_logged);
1762
1763         dev_entry = error->dev;
1764
1765         for (i = 0; i < errors_logged; i++, dev_entry++) {
1766                 ipr_err_separator;
1767
1768                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1769                 ipr_log_vpd(&dev_entry->vpd);
1770
1771                 ipr_err("-----New Device Information-----\n");
1772                 ipr_log_vpd(&dev_entry->new_vpd);
1773
1774                 ipr_err("Cache Directory Card Information:\n");
1775                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1776
1777                 ipr_err("Adapter Card Information:\n");
1778                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1779
1780                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1781                         be32_to_cpu(dev_entry->ioa_data[0]),
1782                         be32_to_cpu(dev_entry->ioa_data[1]),
1783                         be32_to_cpu(dev_entry->ioa_data[2]),
1784                         be32_to_cpu(dev_entry->ioa_data[3]),
1785                         be32_to_cpu(dev_entry->ioa_data[4]));
1786         }
1787 }
1788
1789 /**
1790  * ipr_log_enhanced_array_error - Log an array configuration error.
1791  * @ioa_cfg:    ioa config struct
1792  * @hostrcb:    hostrcb struct
1793  *
1794  * Return value:
1795  *      none
1796  **/
1797 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1798                                          struct ipr_hostrcb *hostrcb)
1799 {
1800         int i, num_entries;
1801         struct ipr_hostrcb_type_14_error *error;
1802         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1803         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1804
1805         error = &hostrcb->hcam.u.error.u.type_14_error;
1806
1807         ipr_err_separator;
1808
1809         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1810                 error->protection_level,
1811                 ioa_cfg->host->host_no,
1812                 error->last_func_vset_res_addr.bus,
1813                 error->last_func_vset_res_addr.target,
1814                 error->last_func_vset_res_addr.lun);
1815
1816         ipr_err_separator;
1817
1818         array_entry = error->array_member;
1819         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1820                             ARRAY_SIZE(error->array_member));
1821
1822         for (i = 0; i < num_entries; i++, array_entry++) {
1823                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1824                         continue;
1825
1826                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1827                         ipr_err("Exposed Array Member %d:\n", i);
1828                 else
1829                         ipr_err("Array Member %d:\n", i);
1830
1831                 ipr_log_ext_vpd(&array_entry->vpd);
1832                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1833                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1834                                  "Expected Location");
1835
1836                 ipr_err_separator;
1837         }
1838 }
1839
1840 /**
1841  * ipr_log_array_error - Log an array configuration error.
1842  * @ioa_cfg:    ioa config struct
1843  * @hostrcb:    hostrcb struct
1844  *
1845  * Return value:
1846  *      none
1847  **/
1848 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1849                                 struct ipr_hostrcb *hostrcb)
1850 {
1851         int i;
1852         struct ipr_hostrcb_type_04_error *error;
1853         struct ipr_hostrcb_array_data_entry *array_entry;
1854         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1855
1856         error = &hostrcb->hcam.u.error.u.type_04_error;
1857
1858         ipr_err_separator;
1859
1860         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1861                 error->protection_level,
1862                 ioa_cfg->host->host_no,
1863                 error->last_func_vset_res_addr.bus,
1864                 error->last_func_vset_res_addr.target,
1865                 error->last_func_vset_res_addr.lun);
1866
1867         ipr_err_separator;
1868
1869         array_entry = error->array_member;
1870
1871         for (i = 0; i < 18; i++) {
1872                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1873                         continue;
1874
1875                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1876                         ipr_err("Exposed Array Member %d:\n", i);
1877                 else
1878                         ipr_err("Array Member %d:\n", i);
1879
1880                 ipr_log_vpd(&array_entry->vpd);
1881
1882                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1883                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1884                                  "Expected Location");
1885
1886                 ipr_err_separator;
1887
1888                 if (i == 9)
1889                         array_entry = error->array_member2;
1890                 else
1891                         array_entry++;
1892         }
1893 }
1894
1895 /**
1896  * ipr_log_hex_data - Log additional hex IOA error data.
1897  * @ioa_cfg:    ioa config struct
1898  * @data:               IOA error data
1899  * @len:                data length
1900  *
1901  * Return value:
1902  *      none
1903  **/
1904 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1905 {
1906         int i;
1907
1908         if (len == 0)
1909                 return;
1910
1911         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1912                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1913
1914         for (i = 0; i < len / 4; i += 4) {
1915                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1916                         be32_to_cpu(data[i]),
1917                         be32_to_cpu(data[i+1]),
1918                         be32_to_cpu(data[i+2]),
1919                         be32_to_cpu(data[i+3]));
1920         }
1921 }
1922
1923 /**
1924  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1925  * @ioa_cfg:    ioa config struct
1926  * @hostrcb:    hostrcb struct
1927  *
1928  * Return value:
1929  *      none
1930  **/
1931 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1932                                             struct ipr_hostrcb *hostrcb)
1933 {
1934         struct ipr_hostrcb_type_17_error *error;
1935
1936         if (ioa_cfg->sis64)
1937                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1938         else
1939                 error = &hostrcb->hcam.u.error.u.type_17_error;
1940
1941         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1942         strim(error->failure_reason);
1943
1944         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1945                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1946         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1947         ipr_log_hex_data(ioa_cfg, error->data,
1948                          be32_to_cpu(hostrcb->hcam.length) -
1949                          (offsetof(struct ipr_hostrcb_error, u) +
1950                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1951 }
1952
1953 /**
1954  * ipr_log_dual_ioa_error - Log a dual adapter error.
1955  * @ioa_cfg:    ioa config struct
1956  * @hostrcb:    hostrcb struct
1957  *
1958  * Return value:
1959  *      none
1960  **/
1961 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1962                                    struct ipr_hostrcb *hostrcb)
1963 {
1964         struct ipr_hostrcb_type_07_error *error;
1965
1966         error = &hostrcb->hcam.u.error.u.type_07_error;
1967         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1968         strim(error->failure_reason);
1969
1970         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1971                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1972         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1973         ipr_log_hex_data(ioa_cfg, error->data,
1974                          be32_to_cpu(hostrcb->hcam.length) -
1975                          (offsetof(struct ipr_hostrcb_error, u) +
1976                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1977 }
1978
1979 static const struct {
1980         u8 active;
1981         char *desc;
1982 } path_active_desc[] = {
1983         { IPR_PATH_NO_INFO, "Path" },
1984         { IPR_PATH_ACTIVE, "Active path" },
1985         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1986 };
1987
1988 static const struct {
1989         u8 state;
1990         char *desc;
1991 } path_state_desc[] = {
1992         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1993         { IPR_PATH_HEALTHY, "is healthy" },
1994         { IPR_PATH_DEGRADED, "is degraded" },
1995         { IPR_PATH_FAILED, "is failed" }
1996 };
1997
1998 /**
1999  * ipr_log_fabric_path - Log a fabric path error
2000  * @hostrcb:    hostrcb struct
2001  * @fabric:             fabric descriptor
2002  *
2003  * Return value:
2004  *      none
2005  **/
2006 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2007                                 struct ipr_hostrcb_fabric_desc *fabric)
2008 {
2009         int i, j;
2010         u8 path_state = fabric->path_state;
2011         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2012         u8 state = path_state & IPR_PATH_STATE_MASK;
2013
2014         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2015                 if (path_active_desc[i].active != active)
2016                         continue;
2017
2018                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2019                         if (path_state_desc[j].state != state)
2020                                 continue;
2021
2022                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2023                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2024                                              path_active_desc[i].desc, path_state_desc[j].desc,
2025                                              fabric->ioa_port);
2026                         } else if (fabric->cascaded_expander == 0xff) {
2027                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2028                                              path_active_desc[i].desc, path_state_desc[j].desc,
2029                                              fabric->ioa_port, fabric->phy);
2030                         } else if (fabric->phy == 0xff) {
2031                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2032                                              path_active_desc[i].desc, path_state_desc[j].desc,
2033                                              fabric->ioa_port, fabric->cascaded_expander);
2034                         } else {
2035                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2036                                              path_active_desc[i].desc, path_state_desc[j].desc,
2037                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2038                         }
2039                         return;
2040                 }
2041         }
2042
2043         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2044                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2045 }
2046
2047 /**
2048  * ipr_log64_fabric_path - Log a fabric path error
2049  * @hostrcb:    hostrcb struct
2050  * @fabric:             fabric descriptor
2051  *
2052  * Return value:
2053  *      none
2054  **/
2055 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2056                                   struct ipr_hostrcb64_fabric_desc *fabric)
2057 {
2058         int i, j;
2059         u8 path_state = fabric->path_state;
2060         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2061         u8 state = path_state & IPR_PATH_STATE_MASK;
2062         char buffer[IPR_MAX_RES_PATH_LENGTH];
2063
2064         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2065                 if (path_active_desc[i].active != active)
2066                         continue;
2067
2068                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2069                         if (path_state_desc[j].state != state)
2070                                 continue;
2071
2072                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2073                                      path_active_desc[i].desc, path_state_desc[j].desc,
2074                                      ipr_format_res_path(hostrcb->ioa_cfg,
2075                                                 fabric->res_path,
2076                                                 buffer, sizeof(buffer)));
2077                         return;
2078                 }
2079         }
2080
2081         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2082                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2083                                     buffer, sizeof(buffer)));
2084 }
2085
2086 static const struct {
2087         u8 type;
2088         char *desc;
2089 } path_type_desc[] = {
2090         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2091         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2092         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2093         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2094 };
2095
2096 static const struct {
2097         u8 status;
2098         char *desc;
2099 } path_status_desc[] = {
2100         { IPR_PATH_CFG_NO_PROB, "Functional" },
2101         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2102         { IPR_PATH_CFG_FAILED, "Failed" },
2103         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2104         { IPR_PATH_NOT_DETECTED, "Missing" },
2105         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2106 };
2107
2108 static const char *link_rate[] = {
2109         "unknown",
2110         "disabled",
2111         "phy reset problem",
2112         "spinup hold",
2113         "port selector",
2114         "unknown",
2115         "unknown",
2116         "unknown",
2117         "1.5Gbps",
2118         "3.0Gbps",
2119         "unknown",
2120         "unknown",
2121         "unknown",
2122         "unknown",
2123         "unknown",
2124         "unknown"
2125 };
2126
2127 /**
2128  * ipr_log_path_elem - Log a fabric path element.
2129  * @hostrcb:    hostrcb struct
2130  * @cfg:                fabric path element struct
2131  *
2132  * Return value:
2133  *      none
2134  **/
2135 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2136                               struct ipr_hostrcb_config_element *cfg)
2137 {
2138         int i, j;
2139         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2140         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2141
2142         if (type == IPR_PATH_CFG_NOT_EXIST)
2143                 return;
2144
2145         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2146                 if (path_type_desc[i].type != type)
2147                         continue;
2148
2149                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2150                         if (path_status_desc[j].status != status)
2151                                 continue;
2152
2153                         if (type == IPR_PATH_CFG_IOA_PORT) {
2154                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2155                                              path_status_desc[j].desc, path_type_desc[i].desc,
2156                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2157                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2158                         } else {
2159                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2160                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2161                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2162                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2163                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2164                                 } else if (cfg->cascaded_expander == 0xff) {
2165                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2166                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2167                                                      path_type_desc[i].desc, cfg->phy,
2168                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2169                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2170                                 } else if (cfg->phy == 0xff) {
2171                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2172                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2173                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2174                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2175                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2176                                 } else {
2177                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2178                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2179                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2180                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2181                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2182                                 }
2183                         }
2184                         return;
2185                 }
2186         }
2187
2188         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2189                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2190                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2191                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2192 }
2193
2194 /**
2195  * ipr_log64_path_elem - Log a fabric path element.
2196  * @hostrcb:    hostrcb struct
2197  * @cfg:                fabric path element struct
2198  *
2199  * Return value:
2200  *      none
2201  **/
2202 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2203                                 struct ipr_hostrcb64_config_element *cfg)
2204 {
2205         int i, j;
2206         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2207         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2208         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2209         char buffer[IPR_MAX_RES_PATH_LENGTH];
2210
2211         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2212                 return;
2213
2214         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2215                 if (path_type_desc[i].type != type)
2216                         continue;
2217
2218                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2219                         if (path_status_desc[j].status != status)
2220                                 continue;
2221
2222                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2223                                      path_status_desc[j].desc, path_type_desc[i].desc,
2224                                      ipr_format_res_path(hostrcb->ioa_cfg,
2225                                         cfg->res_path, buffer, sizeof(buffer)),
2226                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2227                                         be32_to_cpu(cfg->wwid[0]),
2228                                         be32_to_cpu(cfg->wwid[1]));
2229                         return;
2230                 }
2231         }
2232         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2233                      "WWN=%08X%08X\n", cfg->type_status,
2234                      ipr_format_res_path(hostrcb->ioa_cfg,
2235                         cfg->res_path, buffer, sizeof(buffer)),
2236                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2237                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2238 }
2239
2240 /**
2241  * ipr_log_fabric_error - Log a fabric error.
2242  * @ioa_cfg:    ioa config struct
2243  * @hostrcb:    hostrcb struct
2244  *
2245  * Return value:
2246  *      none
2247  **/
2248 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2249                                  struct ipr_hostrcb *hostrcb)
2250 {
2251         struct ipr_hostrcb_type_20_error *error;
2252         struct ipr_hostrcb_fabric_desc *fabric;
2253         struct ipr_hostrcb_config_element *cfg;
2254         int i, add_len;
2255
2256         error = &hostrcb->hcam.u.error.u.type_20_error;
2257         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2258         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2259
2260         add_len = be32_to_cpu(hostrcb->hcam.length) -
2261                 (offsetof(struct ipr_hostrcb_error, u) +
2262                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2263
2264         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2265                 ipr_log_fabric_path(hostrcb, fabric);
2266                 for_each_fabric_cfg(fabric, cfg)
2267                         ipr_log_path_elem(hostrcb, cfg);
2268
2269                 add_len -= be16_to_cpu(fabric->length);
2270                 fabric = (struct ipr_hostrcb_fabric_desc *)
2271                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2272         }
2273
2274         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2275 }
2276
2277 /**
2278  * ipr_log_sis64_array_error - Log a sis64 array error.
2279  * @ioa_cfg:    ioa config struct
2280  * @hostrcb:    hostrcb struct
2281  *
2282  * Return value:
2283  *      none
2284  **/
2285 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2286                                       struct ipr_hostrcb *hostrcb)
2287 {
2288         int i, num_entries;
2289         struct ipr_hostrcb_type_24_error *error;
2290         struct ipr_hostrcb64_array_data_entry *array_entry;
2291         char buffer[IPR_MAX_RES_PATH_LENGTH];
2292         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2293
2294         error = &hostrcb->hcam.u.error64.u.type_24_error;
2295
2296         ipr_err_separator;
2297
2298         ipr_err("RAID %s Array Configuration: %s\n",
2299                 error->protection_level,
2300                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2301                         buffer, sizeof(buffer)));
2302
2303         ipr_err_separator;
2304
2305         array_entry = error->array_member;
2306         num_entries = min_t(u32, error->num_entries,
2307                             ARRAY_SIZE(error->array_member));
2308
2309         for (i = 0; i < num_entries; i++, array_entry++) {
2310
2311                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2312                         continue;
2313
2314                 if (error->exposed_mode_adn == i)
2315                         ipr_err("Exposed Array Member %d:\n", i);
2316                 else
2317                         ipr_err("Array Member %d:\n", i);
2318
2319                 ipr_err("Array Member %d:\n", i);
2320                 ipr_log_ext_vpd(&array_entry->vpd);
2321                 ipr_err("Current Location: %s\n",
2322                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2323                                 buffer, sizeof(buffer)));
2324                 ipr_err("Expected Location: %s\n",
2325                          ipr_format_res_path(ioa_cfg,
2326                                 array_entry->expected_res_path,
2327                                 buffer, sizeof(buffer)));
2328
2329                 ipr_err_separator;
2330         }
2331 }
2332
2333 /**
2334  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2335  * @ioa_cfg:    ioa config struct
2336  * @hostrcb:    hostrcb struct
2337  *
2338  * Return value:
2339  *      none
2340  **/
2341 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2342                                        struct ipr_hostrcb *hostrcb)
2343 {
2344         struct ipr_hostrcb_type_30_error *error;
2345         struct ipr_hostrcb64_fabric_desc *fabric;
2346         struct ipr_hostrcb64_config_element *cfg;
2347         int i, add_len;
2348
2349         error = &hostrcb->hcam.u.error64.u.type_30_error;
2350
2351         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2352         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2353
2354         add_len = be32_to_cpu(hostrcb->hcam.length) -
2355                 (offsetof(struct ipr_hostrcb64_error, u) +
2356                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2357
2358         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2359                 ipr_log64_fabric_path(hostrcb, fabric);
2360                 for_each_fabric_cfg(fabric, cfg)
2361                         ipr_log64_path_elem(hostrcb, cfg);
2362
2363                 add_len -= be16_to_cpu(fabric->length);
2364                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2365                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2366         }
2367
2368         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2369 }
2370
2371 /**
2372  * ipr_log_generic_error - Log an adapter error.
2373  * @ioa_cfg:    ioa config struct
2374  * @hostrcb:    hostrcb struct
2375  *
2376  * Return value:
2377  *      none
2378  **/
2379 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2380                                   struct ipr_hostrcb *hostrcb)
2381 {
2382         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2383                          be32_to_cpu(hostrcb->hcam.length));
2384 }
2385
2386 /**
2387  * ipr_log_sis64_device_error - Log a cache error.
2388  * @ioa_cfg:    ioa config struct
2389  * @hostrcb:    hostrcb struct
2390  *
2391  * Return value:
2392  *      none
2393  **/
2394 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2395                                          struct ipr_hostrcb *hostrcb)
2396 {
2397         struct ipr_hostrcb_type_21_error *error;
2398         char buffer[IPR_MAX_RES_PATH_LENGTH];
2399
2400         error = &hostrcb->hcam.u.error64.u.type_21_error;
2401
2402         ipr_err("-----Failing Device Information-----\n");
2403         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2404                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2405                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2406         ipr_err("Device Resource Path: %s\n",
2407                 __ipr_format_res_path(error->res_path,
2408                                       buffer, sizeof(buffer)));
2409         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2410         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2411         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2412         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2413         ipr_err("SCSI Sense Data:\n");
2414         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2415         ipr_err("SCSI Command Descriptor Block: \n");
2416         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2417
2418         ipr_err("Additional IOA Data:\n");
2419         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2420 }
2421
2422 /**
2423  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2424  * @ioasc:      IOASC
2425  *
2426  * This function will return the index of into the ipr_error_table
2427  * for the specified IOASC. If the IOASC is not in the table,
2428  * 0 will be returned, which points to the entry used for unknown errors.
2429  *
2430  * Return value:
2431  *      index into the ipr_error_table
2432  **/
2433 static u32 ipr_get_error(u32 ioasc)
2434 {
2435         int i;
2436
2437         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2438                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2439                         return i;
2440
2441         return 0;
2442 }
2443
2444 /**
2445  * ipr_handle_log_data - Log an adapter error.
2446  * @ioa_cfg:    ioa config struct
2447  * @hostrcb:    hostrcb struct
2448  *
2449  * This function logs an adapter error to the system.
2450  *
2451  * Return value:
2452  *      none
2453  **/
2454 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2455                                 struct ipr_hostrcb *hostrcb)
2456 {
2457         u32 ioasc;
2458         int error_index;
2459         struct ipr_hostrcb_type_21_error *error;
2460
2461         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2462                 return;
2463
2464         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2465                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2466
2467         if (ioa_cfg->sis64)
2468                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2469         else
2470                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2471
2472         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2473             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2474                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2475                 scsi_report_bus_reset(ioa_cfg->host,
2476                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2477         }
2478
2479         error_index = ipr_get_error(ioasc);
2480
2481         if (!ipr_error_table[error_index].log_hcam)
2482                 return;
2483
2484         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2485             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2486                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2487
2488                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2489                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2490                                 return;
2491         }
2492
2493         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2494
2495         /* Set indication we have logged an error */
2496         ioa_cfg->errors_logged++;
2497
2498         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2499                 return;
2500         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2501                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2502
2503         switch (hostrcb->hcam.overlay_id) {
2504         case IPR_HOST_RCB_OVERLAY_ID_2:
2505                 ipr_log_cache_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_3:
2508                 ipr_log_config_error(ioa_cfg, hostrcb);
2509                 break;
2510         case IPR_HOST_RCB_OVERLAY_ID_4:
2511         case IPR_HOST_RCB_OVERLAY_ID_6:
2512                 ipr_log_array_error(ioa_cfg, hostrcb);
2513                 break;
2514         case IPR_HOST_RCB_OVERLAY_ID_7:
2515                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2516                 break;
2517         case IPR_HOST_RCB_OVERLAY_ID_12:
2518                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_13:
2521                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_14:
2524         case IPR_HOST_RCB_OVERLAY_ID_16:
2525                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2526                 break;
2527         case IPR_HOST_RCB_OVERLAY_ID_17:
2528                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2529                 break;
2530         case IPR_HOST_RCB_OVERLAY_ID_20:
2531                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2532                 break;
2533         case IPR_HOST_RCB_OVERLAY_ID_21:
2534                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2535                 break;
2536         case IPR_HOST_RCB_OVERLAY_ID_23:
2537                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2538                 break;
2539         case IPR_HOST_RCB_OVERLAY_ID_24:
2540         case IPR_HOST_RCB_OVERLAY_ID_26:
2541                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2542                 break;
2543         case IPR_HOST_RCB_OVERLAY_ID_30:
2544                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2545                 break;
2546         case IPR_HOST_RCB_OVERLAY_ID_1:
2547         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2548         default:
2549                 ipr_log_generic_error(ioa_cfg, hostrcb);
2550                 break;
2551         }
2552 }
2553
2554 /**
2555  * ipr_process_error - Op done function for an adapter error log.
2556  * @ipr_cmd:    ipr command struct
2557  *
2558  * This function is the op done function for an error log host
2559  * controlled async from the adapter. It will log the error and
2560  * send the HCAM back to the adapter.
2561  *
2562  * Return value:
2563  *      none
2564  **/
2565 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2566 {
2567         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2568         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2569         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2570         u32 fd_ioasc;
2571
2572         if (ioa_cfg->sis64)
2573                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2574         else
2575                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2576
2577         list_del(&hostrcb->queue);
2578         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2579
2580         if (!ioasc) {
2581                 ipr_handle_log_data(ioa_cfg, hostrcb);
2582                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2583                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2584         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2585                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2586                 dev_err(&ioa_cfg->pdev->dev,
2587                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2588         }
2589
2590         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2591 }
2592
2593 /**
2594  * ipr_timeout -  An internally generated op has timed out.
2595  * @ipr_cmd:    ipr command struct
2596  *
2597  * This function blocks host requests and initiates an
2598  * adapter reset.
2599  *
2600  * Return value:
2601  *      none
2602  **/
2603 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2604 {
2605         unsigned long lock_flags = 0;
2606         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2607
2608         ENTER;
2609         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2610
2611         ioa_cfg->errors_logged++;
2612         dev_err(&ioa_cfg->pdev->dev,
2613                 "Adapter being reset due to command timeout.\n");
2614
2615         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2616                 ioa_cfg->sdt_state = GET_DUMP;
2617
2618         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2619                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2620
2621         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2622         LEAVE;
2623 }
2624
2625 /**
2626  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2627  * @ipr_cmd:    ipr command struct
2628  *
2629  * This function blocks host requests and initiates an
2630  * adapter reset.
2631  *
2632  * Return value:
2633  *      none
2634  **/
2635 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2636 {
2637         unsigned long lock_flags = 0;
2638         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2639
2640         ENTER;
2641         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2642
2643         ioa_cfg->errors_logged++;
2644         dev_err(&ioa_cfg->pdev->dev,
2645                 "Adapter timed out transitioning to operational.\n");
2646
2647         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2648                 ioa_cfg->sdt_state = GET_DUMP;
2649
2650         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2651                 if (ipr_fastfail)
2652                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2653                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2654         }
2655
2656         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2657         LEAVE;
2658 }
2659
2660 /**
2661  * ipr_find_ses_entry - Find matching SES in SES table
2662  * @res:        resource entry struct of SES
2663  *
2664  * Return value:
2665  *      pointer to SES table entry / NULL on failure
2666  **/
2667 static const struct ipr_ses_table_entry *
2668 ipr_find_ses_entry(struct ipr_resource_entry *res)
2669 {
2670         int i, j, matches;
2671         struct ipr_std_inq_vpids *vpids;
2672         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2673
2674         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2675                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2676                         if (ste->compare_product_id_byte[j] == 'X') {
2677                                 vpids = &res->std_inq_data.vpids;
2678                                 if (vpids->product_id[j] == ste->product_id[j])
2679                                         matches++;
2680                                 else
2681                                         break;
2682                         } else
2683                                 matches++;
2684                 }
2685
2686                 if (matches == IPR_PROD_ID_LEN)
2687                         return ste;
2688         }
2689
2690         return NULL;
2691 }
2692
2693 /**
2694  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2695  * @ioa_cfg:    ioa config struct
2696  * @bus:                SCSI bus
2697  * @bus_width:  bus width
2698  *
2699  * Return value:
2700  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2701  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2702  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2703  *      max 160MHz = max 320MB/sec).
2704  **/
2705 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2706 {
2707         struct ipr_resource_entry *res;
2708         const struct ipr_ses_table_entry *ste;
2709         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2710
2711         /* Loop through each config table entry in the config table buffer */
2712         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2713                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2714                         continue;
2715
2716                 if (bus != res->bus)
2717                         continue;
2718
2719                 if (!(ste = ipr_find_ses_entry(res)))
2720                         continue;
2721
2722                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2723         }
2724
2725         return max_xfer_rate;
2726 }
2727
2728 /**
2729  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2730  * @ioa_cfg:            ioa config struct
2731  * @max_delay:          max delay in micro-seconds to wait
2732  *
2733  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2734  *
2735  * Return value:
2736  *      0 on success / other on failure
2737  **/
2738 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2739 {
2740         volatile u32 pcii_reg;
2741         int delay = 1;
2742
2743         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2744         while (delay < max_delay) {
2745                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2746
2747                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2748                         return 0;
2749
2750                 /* udelay cannot be used if delay is more than a few milliseconds */
2751                 if ((delay / 1000) > MAX_UDELAY_MS)
2752                         mdelay(delay / 1000);
2753                 else
2754                         udelay(delay);
2755
2756                 delay += delay;
2757         }
2758         return -EIO;
2759 }
2760
2761 /**
2762  * ipr_get_sis64_dump_data_section - Dump IOA memory
2763  * @ioa_cfg:                    ioa config struct
2764  * @start_addr:                 adapter address to dump
2765  * @dest:                       destination kernel buffer
2766  * @length_in_words:            length to dump in 4 byte words
2767  *
2768  * Return value:
2769  *      0 on success
2770  **/
2771 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2772                                            u32 start_addr,
2773                                            __be32 *dest, u32 length_in_words)
2774 {
2775         int i;
2776
2777         for (i = 0; i < length_in_words; i++) {
2778                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2779                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2780                 dest++;
2781         }
2782
2783         return 0;
2784 }
2785
2786 /**
2787  * ipr_get_ldump_data_section - Dump IOA memory
2788  * @ioa_cfg:                    ioa config struct
2789  * @start_addr:                 adapter address to dump
2790  * @dest:                               destination kernel buffer
2791  * @length_in_words:    length to dump in 4 byte words
2792  *
2793  * Return value:
2794  *      0 on success / -EIO on failure
2795  **/
2796 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2797                                       u32 start_addr,
2798                                       __be32 *dest, u32 length_in_words)
2799 {
2800         volatile u32 temp_pcii_reg;
2801         int i, delay = 0;
2802
2803         if (ioa_cfg->sis64)
2804                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2805                                                        dest, length_in_words);
2806
2807         /* Write IOA interrupt reg starting LDUMP state  */
2808         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2809                ioa_cfg->regs.set_uproc_interrupt_reg32);
2810
2811         /* Wait for IO debug acknowledge */
2812         if (ipr_wait_iodbg_ack(ioa_cfg,
2813                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2814                 dev_err(&ioa_cfg->pdev->dev,
2815                         "IOA dump long data transfer timeout\n");
2816                 return -EIO;
2817         }
2818
2819         /* Signal LDUMP interlocked - clear IO debug ack */
2820         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2821                ioa_cfg->regs.clr_interrupt_reg);
2822
2823         /* Write Mailbox with starting address */
2824         writel(start_addr, ioa_cfg->ioa_mailbox);
2825
2826         /* Signal address valid - clear IOA Reset alert */
2827         writel(IPR_UPROCI_RESET_ALERT,
2828                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2829
2830         for (i = 0; i < length_in_words; i++) {
2831                 /* Wait for IO debug acknowledge */
2832                 if (ipr_wait_iodbg_ack(ioa_cfg,
2833                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2834                         dev_err(&ioa_cfg->pdev->dev,
2835                                 "IOA dump short data transfer timeout\n");
2836                         return -EIO;
2837                 }
2838
2839                 /* Read data from mailbox and increment destination pointer */
2840                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2841                 dest++;
2842
2843                 /* For all but the last word of data, signal data received */
2844                 if (i < (length_in_words - 1)) {
2845                         /* Signal dump data received - Clear IO debug Ack */
2846                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2847                                ioa_cfg->regs.clr_interrupt_reg);
2848                 }
2849         }
2850
2851         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2852         writel(IPR_UPROCI_RESET_ALERT,
2853                ioa_cfg->regs.set_uproc_interrupt_reg32);
2854
2855         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2856                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2857
2858         /* Signal dump data received - Clear IO debug Ack */
2859         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2860                ioa_cfg->regs.clr_interrupt_reg);
2861
2862         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2863         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2864                 temp_pcii_reg =
2865                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2866
2867                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2868                         return 0;
2869
2870                 udelay(10);
2871                 delay += 10;
2872         }
2873
2874         return 0;
2875 }
2876
2877 #ifdef CONFIG_SCSI_IPR_DUMP
2878 /**
2879  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2880  * @ioa_cfg:            ioa config struct
2881  * @pci_address:        adapter address
2882  * @length:                     length of data to copy
2883  *
2884  * Copy data from PCI adapter to kernel buffer.
2885  * Note: length MUST be a 4 byte multiple
2886  * Return value:
2887  *      0 on success / other on failure
2888  **/
2889 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2890                         unsigned long pci_address, u32 length)
2891 {
2892         int bytes_copied = 0;
2893         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2894         __be32 *page;
2895         unsigned long lock_flags = 0;
2896         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2897
2898         if (ioa_cfg->sis64)
2899                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2900         else
2901                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2902
2903         while (bytes_copied < length &&
2904                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2905                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2906                     ioa_dump->page_offset == 0) {
2907                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2908
2909                         if (!page) {
2910                                 ipr_trace;
2911                                 return bytes_copied;
2912                         }
2913
2914                         ioa_dump->page_offset = 0;
2915                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2916                         ioa_dump->next_page_index++;
2917                 } else
2918                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2919
2920                 rem_len = length - bytes_copied;
2921                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2922                 cur_len = min(rem_len, rem_page_len);
2923
2924                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2925                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2926                         rc = -EIO;
2927                 } else {
2928                         rc = ipr_get_ldump_data_section(ioa_cfg,
2929                                                         pci_address + bytes_copied,
2930                                                         &page[ioa_dump->page_offset / 4],
2931                                                         (cur_len / sizeof(u32)));
2932                 }
2933                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2934
2935                 if (!rc) {
2936                         ioa_dump->page_offset += cur_len;
2937                         bytes_copied += cur_len;
2938                 } else {
2939                         ipr_trace;
2940                         break;
2941                 }
2942                 schedule();
2943         }
2944
2945         return bytes_copied;
2946 }
2947
2948 /**
2949  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2950  * @hdr:        dump entry header struct
2951  *
2952  * Return value:
2953  *      nothing
2954  **/
2955 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2956 {
2957         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2958         hdr->num_elems = 1;
2959         hdr->offset = sizeof(*hdr);
2960         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2961 }
2962
2963 /**
2964  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2965  * @ioa_cfg:    ioa config struct
2966  * @driver_dump:        driver dump struct
2967  *
2968  * Return value:
2969  *      nothing
2970  **/
2971 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2972                                    struct ipr_driver_dump *driver_dump)
2973 {
2974         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2975
2976         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2977         driver_dump->ioa_type_entry.hdr.len =
2978                 sizeof(struct ipr_dump_ioa_type_entry) -
2979                 sizeof(struct ipr_dump_entry_header);
2980         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2981         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2982         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2983         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2984                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2985                 ucode_vpd->minor_release[1];
2986         driver_dump->hdr.num_entries++;
2987 }
2988
2989 /**
2990  * ipr_dump_version_data - Fill in the driver version in the dump.
2991  * @ioa_cfg:    ioa config struct
2992  * @driver_dump:        driver dump struct
2993  *
2994  * Return value:
2995  *      nothing
2996  **/
2997 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2998                                   struct ipr_driver_dump *driver_dump)
2999 {
3000         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3001         driver_dump->version_entry.hdr.len =
3002                 sizeof(struct ipr_dump_version_entry) -
3003                 sizeof(struct ipr_dump_entry_header);
3004         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3005         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3006         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3007         driver_dump->hdr.num_entries++;
3008 }
3009
3010 /**
3011  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3012  * @ioa_cfg:    ioa config struct
3013  * @driver_dump:        driver dump struct
3014  *
3015  * Return value:
3016  *      nothing
3017  **/
3018 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3019                                    struct ipr_driver_dump *driver_dump)
3020 {
3021         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3022         driver_dump->trace_entry.hdr.len =
3023                 sizeof(struct ipr_dump_trace_entry) -
3024                 sizeof(struct ipr_dump_entry_header);
3025         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3026         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3027         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3028         driver_dump->hdr.num_entries++;
3029 }
3030
3031 /**
3032  * ipr_dump_location_data - Fill in the IOA location in the dump.
3033  * @ioa_cfg:    ioa config struct
3034  * @driver_dump:        driver dump struct
3035  *
3036  * Return value:
3037  *      nothing
3038  **/
3039 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3040                                    struct ipr_driver_dump *driver_dump)
3041 {
3042         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3043         driver_dump->location_entry.hdr.len =
3044                 sizeof(struct ipr_dump_location_entry) -
3045                 sizeof(struct ipr_dump_entry_header);
3046         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3047         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3048         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3049         driver_dump->hdr.num_entries++;
3050 }
3051
3052 /**
3053  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3054  * @ioa_cfg:    ioa config struct
3055  * @dump:               dump struct
3056  *
3057  * Return value:
3058  *      nothing
3059  **/
3060 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3061 {
3062         unsigned long start_addr, sdt_word;
3063         unsigned long lock_flags = 0;
3064         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3065         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3066         u32 num_entries, max_num_entries, start_off, end_off;
3067         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3068         struct ipr_sdt *sdt;
3069         int valid = 1;
3070         int i;
3071
3072         ENTER;
3073
3074         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3075
3076         if (ioa_cfg->sdt_state != READ_DUMP) {
3077                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3078                 return;
3079         }
3080
3081         if (ioa_cfg->sis64) {
3082                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3083                 ssleep(IPR_DUMP_DELAY_SECONDS);
3084                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3085         }
3086
3087         start_addr = readl(ioa_cfg->ioa_mailbox);
3088
3089         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3090                 dev_err(&ioa_cfg->pdev->dev,
3091                         "Invalid dump table format: %lx\n", start_addr);
3092                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093                 return;
3094         }
3095
3096         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3097
3098         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3099
3100         /* Initialize the overall dump header */
3101         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3102         driver_dump->hdr.num_entries = 1;
3103         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3104         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3105         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3106         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3107
3108         ipr_dump_version_data(ioa_cfg, driver_dump);
3109         ipr_dump_location_data(ioa_cfg, driver_dump);
3110         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3111         ipr_dump_trace_data(ioa_cfg, driver_dump);
3112
3113         /* Update dump_header */
3114         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3115
3116         /* IOA Dump entry */
3117         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3118         ioa_dump->hdr.len = 0;
3119         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3120         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3121
3122         /* First entries in sdt are actually a list of dump addresses and
3123          lengths to gather the real dump data.  sdt represents the pointer
3124          to the ioa generated dump table.  Dump data will be extracted based
3125          on entries in this table */
3126         sdt = &ioa_dump->sdt;
3127
3128         if (ioa_cfg->sis64) {
3129                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3130                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3131         } else {
3132                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3133                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3134         }
3135
3136         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3137                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3138         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3139                                         bytes_to_copy / sizeof(__be32));
3140
3141         /* Smart Dump table is ready to use and the first entry is valid */
3142         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3143             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3144                 dev_err(&ioa_cfg->pdev->dev,
3145                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3146                         rc, be32_to_cpu(sdt->hdr.state));
3147                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3148                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3149                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150                 return;
3151         }
3152
3153         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3154
3155         if (num_entries > max_num_entries)
3156                 num_entries = max_num_entries;
3157
3158         /* Update dump length to the actual data to be copied */
3159         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3160         if (ioa_cfg->sis64)
3161                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3162         else
3163                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3164
3165         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3166
3167         for (i = 0; i < num_entries; i++) {
3168                 if (ioa_dump->hdr.len > max_dump_size) {
3169                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3170                         break;
3171                 }
3172
3173                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3174                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3175                         if (ioa_cfg->sis64)
3176                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3177                         else {
3178                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3179                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3180
3181                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3182                                         bytes_to_copy = end_off - start_off;
3183                                 else
3184                                         valid = 0;
3185                         }
3186                         if (valid) {
3187                                 if (bytes_to_copy > max_dump_size) {
3188                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3189                                         continue;
3190                                 }
3191
3192                                 /* Copy data from adapter to driver buffers */
3193                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3194                                                             bytes_to_copy);
3195
3196                                 ioa_dump->hdr.len += bytes_copied;
3197
3198                                 if (bytes_copied != bytes_to_copy) {
3199                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3200                                         break;
3201                                 }
3202                         }
3203                 }
3204         }
3205
3206         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3207
3208         /* Update dump_header */
3209         driver_dump->hdr.len += ioa_dump->hdr.len;
3210         wmb();
3211         ioa_cfg->sdt_state = DUMP_OBTAINED;
3212         LEAVE;
3213 }
3214
3215 #else
3216 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3217 #endif
3218
3219 /**
3220  * ipr_release_dump - Free adapter dump memory
3221  * @kref:       kref struct
3222  *
3223  * Return value:
3224  *      nothing
3225  **/
3226 static void ipr_release_dump(struct kref *kref)
3227 {
3228         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3229         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3230         unsigned long lock_flags = 0;
3231         int i;
3232
3233         ENTER;
3234         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3235         ioa_cfg->dump = NULL;
3236         ioa_cfg->sdt_state = INACTIVE;
3237         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3238
3239         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3240                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3241
3242         vfree(dump->ioa_dump.ioa_data);
3243         kfree(dump);
3244         LEAVE;
3245 }
3246
3247 /**
3248  * ipr_worker_thread - Worker thread
3249  * @work:               ioa config struct
3250  *
3251  * Called at task level from a work thread. This function takes care
3252  * of adding and removing device from the mid-layer as configuration
3253  * changes are detected by the adapter.
3254  *
3255  * Return value:
3256  *      nothing
3257  **/
3258 static void ipr_worker_thread(struct work_struct *work)
3259 {
3260         unsigned long lock_flags;
3261         struct ipr_resource_entry *res;
3262         struct scsi_device *sdev;
3263         struct ipr_dump *dump;
3264         struct ipr_ioa_cfg *ioa_cfg =
3265                 container_of(work, struct ipr_ioa_cfg, work_q);
3266         u8 bus, target, lun;
3267         int did_work;
3268
3269         ENTER;
3270         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3271
3272         if (ioa_cfg->sdt_state == READ_DUMP) {
3273                 dump = ioa_cfg->dump;
3274                 if (!dump) {
3275                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276                         return;
3277                 }
3278                 kref_get(&dump->kref);
3279                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3280                 ipr_get_ioa_dump(ioa_cfg, dump);
3281                 kref_put(&dump->kref, ipr_release_dump);
3282
3283                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3284                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3285                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3286                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3287                 return;
3288         }
3289
3290 restart:
3291         do {
3292                 did_work = 0;
3293                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3294                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3295                         return;
3296                 }
3297
3298                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3299                         if (res->del_from_ml && res->sdev) {
3300                                 did_work = 1;
3301                                 sdev = res->sdev;
3302                                 if (!scsi_device_get(sdev)) {
3303                                         if (!res->add_to_ml)
3304                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3305                                         else
3306                                                 res->del_from_ml = 0;
3307                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3308                                         scsi_remove_device(sdev);
3309                                         scsi_device_put(sdev);
3310                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3311                                 }
3312                                 break;
3313                         }
3314                 }
3315         } while (did_work);
3316
3317         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3318                 if (res->add_to_ml) {
3319                         bus = res->bus;
3320                         target = res->target;
3321                         lun = res->lun;
3322                         res->add_to_ml = 0;
3323                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3325                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326                         goto restart;
3327                 }
3328         }
3329
3330         ioa_cfg->scan_done = 1;
3331         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3332         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3333         LEAVE;
3334 }
3335
3336 #ifdef CONFIG_SCSI_IPR_TRACE
3337 /**
3338  * ipr_read_trace - Dump the adapter trace
3339  * @filp:               open sysfs file
3340  * @kobj:               kobject struct
3341  * @bin_attr:           bin_attribute struct
3342  * @buf:                buffer
3343  * @off:                offset
3344  * @count:              buffer size
3345  *
3346  * Return value:
3347  *      number of bytes printed to buffer
3348  **/
3349 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3350                               struct bin_attribute *bin_attr,
3351                               char *buf, loff_t off, size_t count)
3352 {
3353         struct device *dev = container_of(kobj, struct device, kobj);
3354         struct Scsi_Host *shost = class_to_shost(dev);
3355         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3356         unsigned long lock_flags = 0;
3357         ssize_t ret;
3358
3359         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3360         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3361                                 IPR_TRACE_SIZE);
3362         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3363
3364         return ret;
3365 }
3366
3367 static struct bin_attribute ipr_trace_attr = {
3368         .attr = {
3369                 .name = "trace",
3370                 .mode = S_IRUGO,
3371         },
3372         .size = 0,
3373         .read = ipr_read_trace,
3374 };
3375 #endif
3376
3377 /**
3378  * ipr_show_fw_version - Show the firmware version
3379  * @dev:        class device struct
3380  * @buf:        buffer
3381  *
3382  * Return value:
3383  *      number of bytes printed to buffer
3384  **/
3385 static ssize_t ipr_show_fw_version(struct device *dev,
3386                                    struct device_attribute *attr, char *buf)
3387 {
3388         struct Scsi_Host *shost = class_to_shost(dev);
3389         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3390         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3391         unsigned long lock_flags = 0;
3392         int len;
3393
3394         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3395         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3396                        ucode_vpd->major_release, ucode_vpd->card_type,
3397                        ucode_vpd->minor_release[0],
3398                        ucode_vpd->minor_release[1]);
3399         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3400         return len;
3401 }
3402
3403 static struct device_attribute ipr_fw_version_attr = {
3404         .attr = {
3405                 .name =         "fw_version",
3406                 .mode =         S_IRUGO,
3407         },
3408         .show = ipr_show_fw_version,
3409 };
3410
3411 /**
3412  * ipr_show_log_level - Show the adapter's error logging level
3413  * @dev:        class device struct
3414  * @buf:        buffer
3415  *
3416  * Return value:
3417  *      number of bytes printed to buffer
3418  **/
3419 static ssize_t ipr_show_log_level(struct device *dev,
3420                                    struct device_attribute *attr, char *buf)
3421 {
3422         struct Scsi_Host *shost = class_to_shost(dev);
3423         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3424         unsigned long lock_flags = 0;
3425         int len;
3426
3427         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3428         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3429         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3430         return len;
3431 }
3432
3433 /**
3434  * ipr_store_log_level - Change the adapter's error logging level
3435  * @dev:        class device struct
3436  * @buf:        buffer
3437  *
3438  * Return value:
3439  *      number of bytes printed to buffer
3440  **/
3441 static ssize_t ipr_store_log_level(struct device *dev,
3442                                    struct device_attribute *attr,
3443                                    const char *buf, size_t count)
3444 {
3445         struct Scsi_Host *shost = class_to_shost(dev);
3446         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3447         unsigned long lock_flags = 0;
3448
3449         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3450         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3451         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3452         return strlen(buf);
3453 }
3454
3455 static struct device_attribute ipr_log_level_attr = {
3456         .attr = {
3457                 .name =         "log_level",
3458                 .mode =         S_IRUGO | S_IWUSR,
3459         },
3460         .show = ipr_show_log_level,
3461         .store = ipr_store_log_level
3462 };
3463
3464 /**
3465  * ipr_store_diagnostics - IOA Diagnostics interface
3466  * @dev:        device struct
3467  * @buf:        buffer
3468  * @count:      buffer size
3469  *
3470  * This function will reset the adapter and wait a reasonable
3471  * amount of time for any errors that the adapter might log.
3472  *
3473  * Return value:
3474  *      count on success / other on failure
3475  **/
3476 static ssize_t ipr_store_diagnostics(struct device *dev,
3477                                      struct device_attribute *attr,
3478                                      const char *buf, size_t count)
3479 {
3480         struct Scsi_Host *shost = class_to_shost(dev);
3481         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3482         unsigned long lock_flags = 0;
3483         int rc = count;
3484
3485         if (!capable(CAP_SYS_ADMIN))
3486                 return -EACCES;
3487
3488         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3489         while (ioa_cfg->in_reset_reload) {
3490                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3491                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3492                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3493         }
3494
3495         ioa_cfg->errors_logged = 0;
3496         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3497
3498         if (ioa_cfg->in_reset_reload) {
3499                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3500                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3501
3502                 /* Wait for a second for any errors to be logged */
3503                 msleep(1000);
3504         } else {
3505                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3506                 return -EIO;
3507         }
3508
3509         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3511                 rc = -EIO;
3512         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3513
3514         return rc;
3515 }
3516
3517 static struct device_attribute ipr_diagnostics_attr = {
3518         .attr = {
3519                 .name =         "run_diagnostics",
3520                 .mode =         S_IWUSR,
3521         },
3522         .store = ipr_store_diagnostics
3523 };
3524
3525 /**
3526  * ipr_show_adapter_state - Show the adapter's state
3527  * @class_dev:  device struct
3528  * @buf:        buffer
3529  *
3530  * Return value:
3531  *      number of bytes printed to buffer
3532  **/
3533 static ssize_t ipr_show_adapter_state(struct device *dev,
3534                                       struct device_attribute *attr, char *buf)
3535 {
3536         struct Scsi_Host *shost = class_to_shost(dev);
3537         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3538         unsigned long lock_flags = 0;
3539         int len;
3540
3541         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3542         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3543                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3544         else
3545                 len = snprintf(buf, PAGE_SIZE, "online\n");
3546         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3547         return len;
3548 }
3549
3550 /**
3551  * ipr_store_adapter_state - Change adapter state
3552  * @dev:        device struct
3553  * @buf:        buffer
3554  * @count:      buffer size
3555  *
3556  * This function will change the adapter's state.
3557  *
3558  * Return value:
3559  *      count on success / other on failure
3560  **/
3561 static ssize_t ipr_store_adapter_state(struct device *dev,
3562                                        struct device_attribute *attr,
3563                                        const char *buf, size_t count)
3564 {
3565         struct Scsi_Host *shost = class_to_shost(dev);
3566         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3567         unsigned long lock_flags;
3568         int result = count, i;
3569
3570         if (!capable(CAP_SYS_ADMIN))
3571                 return -EACCES;
3572
3573         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3574         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3575             !strncmp(buf, "online", 6)) {
3576                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3577                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3578                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3579                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3580                 }
3581                 wmb();
3582                 ioa_cfg->reset_retries = 0;
3583                 ioa_cfg->in_ioa_bringdown = 0;
3584                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3585         }
3586         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3587         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3588
3589         return result;
3590 }
3591
3592 static struct device_attribute ipr_ioa_state_attr = {
3593         .attr = {
3594                 .name =         "online_state",
3595                 .mode =         S_IRUGO | S_IWUSR,
3596         },
3597         .show = ipr_show_adapter_state,
3598         .store = ipr_store_adapter_state
3599 };
3600
3601 /**
3602  * ipr_store_reset_adapter - Reset the adapter
3603  * @dev:        device struct
3604  * @buf:        buffer
3605  * @count:      buffer size
3606  *
3607  * This function will reset the adapter.
3608  *
3609  * Return value:
3610  *      count on success / other on failure
3611  **/
3612 static ssize_t ipr_store_reset_adapter(struct device *dev,
3613                                        struct device_attribute *attr,
3614                                        const char *buf, size_t count)
3615 {
3616         struct Scsi_Host *shost = class_to_shost(dev);
3617         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3618         unsigned long lock_flags;
3619         int result = count;
3620
3621         if (!capable(CAP_SYS_ADMIN))
3622                 return -EACCES;
3623
3624         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3625         if (!ioa_cfg->in_reset_reload)
3626                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3627         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3628         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3629
3630         return result;
3631 }
3632
3633 static struct device_attribute ipr_ioa_reset_attr = {
3634         .attr = {
3635                 .name =         "reset_host",
3636                 .mode =         S_IWUSR,
3637         },
3638         .store = ipr_store_reset_adapter
3639 };
3640
3641 static int ipr_iopoll(struct irq_poll *iop, int budget);
3642  /**
3643  * ipr_show_iopoll_weight - Show ipr polling mode
3644  * @dev:        class device struct
3645  * @buf:        buffer
3646  *
3647  * Return value:
3648  *      number of bytes printed to buffer
3649  **/
3650 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3651                                    struct device_attribute *attr, char *buf)
3652 {
3653         struct Scsi_Host *shost = class_to_shost(dev);
3654         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3655         unsigned long lock_flags = 0;
3656         int len;
3657
3658         spin_lock_irqsave(shost->host_lock, lock_flags);
3659         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3660         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3661
3662         return len;
3663 }
3664
3665 /**
3666  * ipr_store_iopoll_weight - Change the adapter's polling mode
3667  * @dev:        class device struct
3668  * @buf:        buffer
3669  *
3670  * Return value:
3671  *      number of bytes printed to buffer
3672  **/
3673 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3674                                         struct device_attribute *attr,
3675                                         const char *buf, size_t count)
3676 {
3677         struct Scsi_Host *shost = class_to_shost(dev);
3678         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3679         unsigned long user_iopoll_weight;
3680         unsigned long lock_flags = 0;
3681         int i;
3682
3683         if (!ioa_cfg->sis64) {
3684                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3685                 return -EINVAL;
3686         }
3687         if (kstrtoul(buf, 10, &user_iopoll_weight))
3688                 return -EINVAL;
3689
3690         if (user_iopoll_weight > 256) {
3691                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3692                 return -EINVAL;
3693         }
3694
3695         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3696                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3697                 return strlen(buf);
3698         }
3699
3700         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3701                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3702                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3703         }
3704
3705         spin_lock_irqsave(shost->host_lock, lock_flags);
3706         ioa_cfg->iopoll_weight = user_iopoll_weight;
3707         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3708                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3709                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3710                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3711                 }
3712         }
3713         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3714
3715         return strlen(buf);
3716 }
3717
3718 static struct device_attribute ipr_iopoll_weight_attr = {
3719         .attr = {
3720                 .name =         "iopoll_weight",
3721                 .mode =         S_IRUGO | S_IWUSR,
3722         },
3723         .show = ipr_show_iopoll_weight,
3724         .store = ipr_store_iopoll_weight
3725 };
3726
3727 /**
3728  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3729  * @buf_len:            buffer length
3730  *
3731  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3732  * list to use for microcode download
3733  *
3734  * Return value:
3735  *      pointer to sglist / NULL on failure
3736  **/
3737 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3738 {
3739         int sg_size, order, bsize_elem, num_elem, i, j;
3740         struct ipr_sglist *sglist;
3741         struct scatterlist *scatterlist;
3742         struct page *page;
3743
3744         /* Get the minimum size per scatter/gather element */
3745         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3746
3747         /* Get the actual size per element */
3748         order = get_order(sg_size);
3749
3750         /* Determine the actual number of bytes per element */
3751         bsize_elem = PAGE_SIZE * (1 << order);
3752
3753         /* Determine the actual number of sg entries needed */
3754         if (buf_len % bsize_elem)
3755                 num_elem = (buf_len / bsize_elem) + 1;
3756         else
3757                 num_elem = buf_len / bsize_elem;
3758
3759         /* Allocate a scatter/gather list for the DMA */
3760         sglist = kzalloc(sizeof(struct ipr_sglist) +
3761                          (sizeof(struct scatterlist) * (num_elem - 1)),
3762                          GFP_KERNEL);
3763
3764         if (sglist == NULL) {
3765                 ipr_trace;
3766                 return NULL;
3767         }
3768
3769         scatterlist = sglist->scatterlist;
3770         sg_init_table(scatterlist, num_elem);
3771
3772         sglist->order = order;
3773         sglist->num_sg = num_elem;
3774
3775         /* Allocate a bunch of sg elements */
3776         for (i = 0; i < num_elem; i++) {
3777                 page = alloc_pages(GFP_KERNEL, order);
3778                 if (!page) {
3779                         ipr_trace;
3780
3781                         /* Free up what we already allocated */
3782                         for (j = i - 1; j >= 0; j--)
3783                                 __free_pages(sg_page(&scatterlist[j]), order);
3784                         kfree(sglist);
3785                         return NULL;
3786                 }
3787
3788                 sg_set_page(&scatterlist[i], page, 0, 0);
3789         }
3790
3791         return sglist;
3792 }
3793
3794 /**
3795  * ipr_free_ucode_buffer - Frees a microcode download buffer
3796  * @p_dnld:             scatter/gather list pointer
3797  *
3798  * Free a DMA'able ucode download buffer previously allocated with
3799  * ipr_alloc_ucode_buffer
3800  *
3801  * Return value:
3802  *      nothing
3803  **/
3804 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3805 {
3806         int i;
3807
3808         for (i = 0; i < sglist->num_sg; i++)
3809                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3810
3811         kfree(sglist);
3812 }
3813
3814 /**
3815  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3816  * @sglist:             scatter/gather list pointer
3817  * @buffer:             buffer pointer
3818  * @len:                buffer length
3819  *
3820  * Copy a microcode image from a user buffer into a buffer allocated by
3821  * ipr_alloc_ucode_buffer
3822  *
3823  * Return value:
3824  *      0 on success / other on failure
3825  **/
3826 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3827                                  u8 *buffer, u32 len)
3828 {
3829         int bsize_elem, i, result = 0;
3830         struct scatterlist *scatterlist;
3831         void *kaddr;
3832
3833         /* Determine the actual number of bytes per element */
3834         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3835
3836         scatterlist = sglist->scatterlist;
3837
3838         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3839                 struct page *page = sg_page(&scatterlist[i]);
3840
3841                 kaddr = kmap(page);
3842                 memcpy(kaddr, buffer, bsize_elem);
3843                 kunmap(page);
3844
3845                 scatterlist[i].length = bsize_elem;
3846
3847                 if (result != 0) {
3848                         ipr_trace;
3849                         return result;
3850                 }
3851         }
3852
3853         if (len % bsize_elem) {
3854                 struct page *page = sg_page(&scatterlist[i]);
3855
3856                 kaddr = kmap(page);
3857                 memcpy(kaddr, buffer, len % bsize_elem);
3858                 kunmap(page);
3859
3860                 scatterlist[i].length = len % bsize_elem;
3861         }
3862
3863         sglist->buffer_len = len;
3864         return result;
3865 }
3866
3867 /**
3868  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3869  * @ipr_cmd:            ipr command struct
3870  * @sglist:             scatter/gather list
3871  *
3872  * Builds a microcode download IOA data list (IOADL).
3873  *
3874  **/
3875 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3876                                     struct ipr_sglist *sglist)
3877 {
3878         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3879         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3880         struct scatterlist *scatterlist = sglist->scatterlist;
3881         int i;
3882
3883         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3884         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3885         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3886
3887         ioarcb->ioadl_len =
3888                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3889         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3890                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3891                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3892                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3893         }
3894
3895         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3896 }
3897
3898 /**
3899  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3900  * @ipr_cmd:    ipr command struct
3901  * @sglist:             scatter/gather list
3902  *
3903  * Builds a microcode download IOA data list (IOADL).
3904  *
3905  **/
3906 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3907                                   struct ipr_sglist *sglist)
3908 {
3909         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3910         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3911         struct scatterlist *scatterlist = sglist->scatterlist;
3912         int i;
3913
3914         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3915         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3916         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3917
3918         ioarcb->ioadl_len =
3919                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3920
3921         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3922                 ioadl[i].flags_and_data_len =
3923                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3924                 ioadl[i].address =
3925                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3926         }
3927
3928         ioadl[i-1].flags_and_data_len |=
3929                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3930 }
3931
3932 /**
3933  * ipr_update_ioa_ucode - Update IOA's microcode
3934  * @ioa_cfg:    ioa config struct
3935  * @sglist:             scatter/gather list
3936  *
3937  * Initiate an adapter reset to update the IOA's microcode
3938  *
3939  * Return value:
3940  *      0 on success / -EIO on failure
3941  **/
3942 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3943                                 struct ipr_sglist *sglist)
3944 {
3945         unsigned long lock_flags;
3946
3947         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3948         while (ioa_cfg->in_reset_reload) {
3949                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3951                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3952         }
3953
3954         if (ioa_cfg->ucode_sglist) {
3955                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3956                 dev_err(&ioa_cfg->pdev->dev,
3957                         "Microcode download already in progress\n");
3958                 return -EIO;
3959         }
3960
3961         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3962                                         sglist->scatterlist, sglist->num_sg,
3963                                         DMA_TO_DEVICE);
3964
3965         if (!sglist->num_dma_sg) {
3966                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3967                 dev_err(&ioa_cfg->pdev->dev,
3968                         "Failed to map microcode download buffer!\n");
3969                 return -EIO;
3970         }
3971
3972         ioa_cfg->ucode_sglist = sglist;
3973         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3974         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3975         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3976
3977         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3978         ioa_cfg->ucode_sglist = NULL;
3979         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3980         return 0;
3981 }
3982
3983 /**
3984  * ipr_store_update_fw - Update the firmware on the adapter
3985  * @class_dev:  device struct
3986  * @buf:        buffer
3987  * @count:      buffer size
3988  *
3989  * This function will update the firmware on the adapter.
3990  *
3991  * Return value:
3992  *      count on success / other on failure
3993  **/
3994 static ssize_t ipr_store_update_fw(struct device *dev,
3995                                    struct device_attribute *attr,
3996                                    const char *buf, size_t count)
3997 {
3998         struct Scsi_Host *shost = class_to_shost(dev);
3999         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4000         struct ipr_ucode_image_header *image_hdr;
4001         const struct firmware *fw_entry;
4002         struct ipr_sglist *sglist;
4003         char fname[100];
4004         char *src;
4005         char *endline;
4006         int result, dnld_size;
4007
4008         if (!capable(CAP_SYS_ADMIN))
4009                 return -EACCES;
4010
4011         snprintf(fname, sizeof(fname), "%s", buf);
4012
4013         endline = strchr(fname, '\n');
4014         if (endline)
4015                 *endline = '\0';
4016
4017         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4018                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4019                 return -EIO;
4020         }
4021
4022         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4023
4024         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4025         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4026         sglist = ipr_alloc_ucode_buffer(dnld_size);
4027
4028         if (!sglist) {
4029                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4030                 release_firmware(fw_entry);
4031                 return -ENOMEM;
4032         }
4033
4034         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4035
4036         if (result) {
4037                 dev_err(&ioa_cfg->pdev->dev,
4038                         "Microcode buffer copy to DMA buffer failed\n");
4039                 goto out;
4040         }
4041
4042         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4043
4044         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4045
4046         if (!result)
4047                 result = count;
4048 out:
4049         ipr_free_ucode_buffer(sglist);
4050         release_firmware(fw_entry);
4051         return result;
4052 }
4053
4054 static struct device_attribute ipr_update_fw_attr = {
4055         .attr = {
4056                 .name =         "update_fw",
4057                 .mode =         S_IWUSR,
4058         },
4059         .store = ipr_store_update_fw
4060 };
4061
4062 /**
4063  * ipr_show_fw_type - Show the adapter's firmware type.
4064  * @dev:        class device struct
4065  * @buf:        buffer
4066  *
4067  * Return value:
4068  *      number of bytes printed to buffer
4069  **/
4070 static ssize_t ipr_show_fw_type(struct device *dev,
4071                                 struct device_attribute *attr, char *buf)
4072 {
4073         struct Scsi_Host *shost = class_to_shost(dev);
4074         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4075         unsigned long lock_flags = 0;
4076         int len;
4077
4078         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4079         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4080         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4081         return len;
4082 }
4083
4084 static struct device_attribute ipr_ioa_fw_type_attr = {
4085         .attr = {
4086                 .name =         "fw_type",
4087                 .mode =         S_IRUGO,
4088         },
4089         .show = ipr_show_fw_type
4090 };
4091
4092 static struct device_attribute *ipr_ioa_attrs[] = {
4093         &ipr_fw_version_attr,
4094         &ipr_log_level_attr,
4095         &ipr_diagnostics_attr,
4096         &ipr_ioa_state_attr,
4097         &ipr_ioa_reset_attr,
4098         &ipr_update_fw_attr,
4099         &ipr_ioa_fw_type_attr,
4100         &ipr_iopoll_weight_attr,
4101         NULL,
4102 };
4103
4104 #ifdef CONFIG_SCSI_IPR_DUMP
4105 /**
4106  * ipr_read_dump - Dump the adapter
4107  * @filp:               open sysfs file
4108  * @kobj:               kobject struct
4109  * @bin_attr:           bin_attribute struct
4110  * @buf:                buffer
4111  * @off:                offset
4112  * @count:              buffer size
4113  *
4114  * Return value:
4115  *      number of bytes printed to buffer
4116  **/
4117 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4118                              struct bin_attribute *bin_attr,
4119                              char *buf, loff_t off, size_t count)
4120 {
4121         struct device *cdev = container_of(kobj, struct device, kobj);
4122         struct Scsi_Host *shost = class_to_shost(cdev);
4123         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4124         struct ipr_dump *dump;
4125         unsigned long lock_flags = 0;
4126         char *src;
4127         int len, sdt_end;
4128         size_t rc = count;
4129
4130         if (!capable(CAP_SYS_ADMIN))
4131                 return -EACCES;
4132
4133         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4134         dump = ioa_cfg->dump;
4135
4136         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4137                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4138                 return 0;
4139         }
4140         kref_get(&dump->kref);
4141         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4142
4143         if (off > dump->driver_dump.hdr.len) {
4144                 kref_put(&dump->kref, ipr_release_dump);
4145                 return 0;
4146         }
4147
4148         if (off + count > dump->driver_dump.hdr.len) {
4149                 count = dump->driver_dump.hdr.len - off;
4150                 rc = count;
4151         }
4152
4153         if (count && off < sizeof(dump->driver_dump)) {
4154                 if (off + count > sizeof(dump->driver_dump))
4155                         len = sizeof(dump->driver_dump) - off;
4156                 else
4157                         len = count;
4158                 src = (u8 *)&dump->driver_dump + off;
4159                 memcpy(buf, src, len);
4160                 buf += len;
4161                 off += len;
4162                 count -= len;
4163         }
4164
4165         off -= sizeof(dump->driver_dump);
4166
4167         if (ioa_cfg->sis64)
4168                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4169                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4170                            sizeof(struct ipr_sdt_entry));
4171         else
4172                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4173                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4174
4175         if (count && off < sdt_end) {
4176                 if (off + count > sdt_end)
4177                         len = sdt_end - off;
4178                 else
4179                         len = count;
4180                 src = (u8 *)&dump->ioa_dump + off;
4181                 memcpy(buf, src, len);
4182                 buf += len;
4183                 off += len;
4184                 count -= len;
4185         }
4186
4187         off -= sdt_end;
4188
4189         while (count) {
4190                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4191                         len = PAGE_ALIGN(off) - off;
4192                 else
4193                         len = count;
4194                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4195                 src += off & ~PAGE_MASK;
4196                 memcpy(buf, src, len);
4197                 buf += len;
4198                 off += len;
4199                 count -= len;
4200         }
4201
4202         kref_put(&dump->kref, ipr_release_dump);
4203         return rc;
4204 }
4205
4206 /**
4207  * ipr_alloc_dump - Prepare for adapter dump
4208  * @ioa_cfg:    ioa config struct
4209  *
4210  * Return value:
4211  *      0 on success / other on failure
4212  **/
4213 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4214 {
4215         struct ipr_dump *dump;
4216         __be32 **ioa_data;
4217         unsigned long lock_flags = 0;
4218
4219         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4220
4221         if (!dump) {
4222                 ipr_err("Dump memory allocation failed\n");
4223                 return -ENOMEM;
4224         }
4225
4226         if (ioa_cfg->sis64)
4227                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4228         else
4229                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4230
4231         if (!ioa_data) {
4232                 ipr_err("Dump memory allocation failed\n");
4233                 kfree(dump);
4234                 return -ENOMEM;
4235         }
4236
4237         dump->ioa_dump.ioa_data = ioa_data;
4238
4239         kref_init(&dump->kref);
4240         dump->ioa_cfg = ioa_cfg;
4241
4242         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4243
4244         if (INACTIVE != ioa_cfg->sdt_state) {
4245                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4246                 vfree(dump->ioa_dump.ioa_data);
4247                 kfree(dump);
4248                 return 0;
4249         }
4250
4251         ioa_cfg->dump = dump;
4252         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4253         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4254                 ioa_cfg->dump_taken = 1;
4255                 schedule_work(&ioa_cfg->work_q);
4256         }
4257         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4258
4259         return 0;
4260 }
4261
4262 /**
4263  * ipr_free_dump - Free adapter dump memory
4264  * @ioa_cfg:    ioa config struct
4265  *
4266  * Return value:
4267  *      0 on success / other on failure
4268  **/
4269 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4270 {
4271         struct ipr_dump *dump;
4272         unsigned long lock_flags = 0;
4273
4274         ENTER;
4275
4276         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4277         dump = ioa_cfg->dump;
4278         if (!dump) {
4279                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4280                 return 0;
4281         }
4282
4283         ioa_cfg->dump = NULL;
4284         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4285
4286         kref_put(&dump->kref, ipr_release_dump);
4287
4288         LEAVE;
4289         return 0;
4290 }
4291
4292 /**
4293  * ipr_write_dump - Setup dump state of adapter
4294  * @filp:               open sysfs file
4295  * @kobj:               kobject struct
4296  * @bin_attr:           bin_attribute struct
4297  * @buf:                buffer
4298  * @off:                offset
4299  * @count:              buffer size
4300  *
4301  * Return value:
4302  *      number of bytes printed to buffer
4303  **/
4304 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4305                               struct bin_attribute *bin_attr,
4306                               char *buf, loff_t off, size_t count)
4307 {
4308         struct device *cdev = container_of(kobj, struct device, kobj);
4309         struct Scsi_Host *shost = class_to_shost(cdev);
4310         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4311         int rc;
4312
4313         if (!capable(CAP_SYS_ADMIN))
4314                 return -EACCES;
4315
4316         if (buf[0] == '1')
4317                 rc = ipr_alloc_dump(ioa_cfg);
4318         else if (buf[0] == '0')
4319                 rc = ipr_free_dump(ioa_cfg);
4320         else
4321                 return -EINVAL;
4322
4323         if (rc)
4324                 return rc;
4325         else
4326                 return count;
4327 }
4328
4329 static struct bin_attribute ipr_dump_attr = {
4330         .attr = {
4331                 .name = "dump",
4332                 .mode = S_IRUSR | S_IWUSR,
4333         },
4334         .size = 0,
4335         .read = ipr_read_dump,
4336         .write = ipr_write_dump
4337 };
4338 #else
4339 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4340 #endif
4341
4342 /**
4343  * ipr_change_queue_depth - Change the device's queue depth
4344  * @sdev:       scsi device struct
4345  * @qdepth:     depth to set
4346  * @reason:     calling context
4347  *
4348  * Return value:
4349  *      actual depth set
4350  **/
4351 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4352 {
4353         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4354         struct ipr_resource_entry *res;
4355         unsigned long lock_flags = 0;
4356
4357         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4358         res = (struct ipr_resource_entry *)sdev->hostdata;
4359
4360         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4361                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4362         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4363
4364         scsi_change_queue_depth(sdev, qdepth);
4365         return sdev->queue_depth;
4366 }
4367
4368 /**
4369  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4370  * @dev:        device struct
4371  * @attr:       device attribute structure
4372  * @buf:        buffer
4373  *
4374  * Return value:
4375  *      number of bytes printed to buffer
4376  **/
4377 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4378 {
4379         struct scsi_device *sdev = to_scsi_device(dev);
4380         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4381         struct ipr_resource_entry *res;
4382         unsigned long lock_flags = 0;
4383         ssize_t len = -ENXIO;
4384
4385         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386         res = (struct ipr_resource_entry *)sdev->hostdata;
4387         if (res)
4388                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4389         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4390         return len;
4391 }
4392
4393 static struct device_attribute ipr_adapter_handle_attr = {
4394         .attr = {
4395                 .name =         "adapter_handle",
4396                 .mode =         S_IRUSR,
4397         },
4398         .show = ipr_show_adapter_handle
4399 };
4400
4401 /**
4402  * ipr_show_resource_path - Show the resource path or the resource address for
4403  *                          this device.
4404  * @dev:        device struct
4405  * @attr:       device attribute structure
4406  * @buf:        buffer
4407  *
4408  * Return value:
4409  *      number of bytes printed to buffer
4410  **/
4411 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4412 {
4413         struct scsi_device *sdev = to_scsi_device(dev);
4414         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4415         struct ipr_resource_entry *res;
4416         unsigned long lock_flags = 0;
4417         ssize_t len = -ENXIO;
4418         char buffer[IPR_MAX_RES_PATH_LENGTH];
4419
4420         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4421         res = (struct ipr_resource_entry *)sdev->hostdata;
4422         if (res && ioa_cfg->sis64)
4423                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4424                                __ipr_format_res_path(res->res_path, buffer,
4425                                                      sizeof(buffer)));
4426         else if (res)
4427                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4428                                res->bus, res->target, res->lun);
4429
4430         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4431         return len;
4432 }
4433
4434 static struct device_attribute ipr_resource_path_attr = {
4435         .attr = {
4436                 .name =         "resource_path",
4437                 .mode =         S_IRUGO,
4438         },
4439         .show = ipr_show_resource_path
4440 };
4441
4442 /**
4443  * ipr_show_device_id - Show the device_id for this device.
4444  * @dev:        device struct
4445  * @attr:       device attribute structure
4446  * @buf:        buffer
4447  *
4448  * Return value:
4449  *      number of bytes printed to buffer
4450  **/
4451 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4452 {
4453         struct scsi_device *sdev = to_scsi_device(dev);
4454         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4455         struct ipr_resource_entry *res;
4456         unsigned long lock_flags = 0;
4457         ssize_t len = -ENXIO;
4458
4459         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4460         res = (struct ipr_resource_entry *)sdev->hostdata;
4461         if (res && ioa_cfg->sis64)
4462                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4463         else if (res)
4464                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4465
4466         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4467         return len;
4468 }
4469
4470 static struct device_attribute ipr_device_id_attr = {
4471         .attr = {
4472                 .name =         "device_id",
4473                 .mode =         S_IRUGO,
4474         },
4475         .show = ipr_show_device_id
4476 };
4477
4478 /**
4479  * ipr_show_resource_type - Show the resource type for this device.
4480  * @dev:        device struct
4481  * @attr:       device attribute structure
4482  * @buf:        buffer
4483  *
4484  * Return value:
4485  *      number of bytes printed to buffer
4486  **/
4487 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4488 {
4489         struct scsi_device *sdev = to_scsi_device(dev);
4490         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4491         struct ipr_resource_entry *res;
4492         unsigned long lock_flags = 0;
4493         ssize_t len = -ENXIO;
4494
4495         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4496         res = (struct ipr_resource_entry *)sdev->hostdata;
4497
4498         if (res)
4499                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4500
4501         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4502         return len;
4503 }
4504
4505 static struct device_attribute ipr_resource_type_attr = {
4506         .attr = {
4507                 .name =         "resource_type",
4508                 .mode =         S_IRUGO,
4509         },
4510         .show = ipr_show_resource_type
4511 };
4512
4513 /**
4514  * ipr_show_raw_mode - Show the adapter's raw mode
4515  * @dev:        class device struct
4516  * @buf:        buffer
4517  *
4518  * Return value:
4519  *      number of bytes printed to buffer
4520  **/
4521 static ssize_t ipr_show_raw_mode(struct device *dev,
4522                                  struct device_attribute *attr, char *buf)
4523 {
4524         struct scsi_device *sdev = to_scsi_device(dev);
4525         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4526         struct ipr_resource_entry *res;
4527         unsigned long lock_flags = 0;
4528         ssize_t len;
4529
4530         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4531         res = (struct ipr_resource_entry *)sdev->hostdata;
4532         if (res)
4533                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4534         else
4535                 len = -ENXIO;
4536         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4537         return len;
4538 }
4539
4540 /**
4541  * ipr_store_raw_mode - Change the adapter's raw mode
4542  * @dev:        class device struct
4543  * @buf:        buffer
4544  *
4545  * Return value:
4546  *      number of bytes printed to buffer
4547  **/
4548 static ssize_t ipr_store_raw_mode(struct device *dev,
4549                                   struct device_attribute *attr,
4550                                   const char *buf, size_t count)
4551 {
4552         struct scsi_device *sdev = to_scsi_device(dev);
4553         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4554         struct ipr_resource_entry *res;
4555         unsigned long lock_flags = 0;
4556         ssize_t len;
4557
4558         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4559         res = (struct ipr_resource_entry *)sdev->hostdata;
4560         if (res) {
4561                 if (ipr_is_af_dasd_device(res)) {
4562                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4563                         len = strlen(buf);
4564                         if (res->sdev)
4565                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4566                                         res->raw_mode ? "enabled" : "disabled");
4567                 } else
4568                         len = -EINVAL;
4569         } else
4570                 len = -ENXIO;
4571         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4572         return len;
4573 }
4574
4575 static struct device_attribute ipr_raw_mode_attr = {
4576         .attr = {
4577                 .name =         "raw_mode",
4578                 .mode =         S_IRUGO | S_IWUSR,
4579         },
4580         .show = ipr_show_raw_mode,
4581         .store = ipr_store_raw_mode
4582 };
4583
4584 static struct device_attribute *ipr_dev_attrs[] = {
4585         &ipr_adapter_handle_attr,
4586         &ipr_resource_path_attr,
4587         &ipr_device_id_attr,
4588         &ipr_resource_type_attr,
4589         &ipr_raw_mode_attr,
4590         NULL,
4591 };
4592
4593 /**
4594  * ipr_biosparam - Return the HSC mapping
4595  * @sdev:                       scsi device struct
4596  * @block_device:       block device pointer
4597  * @capacity:           capacity of the device
4598  * @parm:                       Array containing returned HSC values.
4599  *
4600  * This function generates the HSC parms that fdisk uses.
4601  * We want to make sure we return something that places partitions
4602  * on 4k boundaries for best performance with the IOA.
4603  *
4604  * Return value:
4605  *      0 on success
4606  **/
4607 static int ipr_biosparam(struct scsi_device *sdev,
4608                          struct block_device *block_device,
4609                          sector_t capacity, int *parm)
4610 {
4611         int heads, sectors;
4612         sector_t cylinders;
4613
4614         heads = 128;
4615         sectors = 32;
4616
4617         cylinders = capacity;
4618         sector_div(cylinders, (128 * 32));
4619
4620         /* return result */
4621         parm[0] = heads;
4622         parm[1] = sectors;
4623         parm[2] = cylinders;
4624
4625         return 0;
4626 }
4627
4628 /**
4629  * ipr_find_starget - Find target based on bus/target.
4630  * @starget:    scsi target struct
4631  *
4632  * Return value:
4633  *      resource entry pointer if found / NULL if not found
4634  **/
4635 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4636 {
4637         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4638         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4639         struct ipr_resource_entry *res;
4640
4641         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4642                 if ((res->bus == starget->channel) &&
4643                     (res->target == starget->id)) {
4644                         return res;
4645                 }
4646         }
4647
4648         return NULL;
4649 }
4650
4651 static struct ata_port_info sata_port_info;
4652
4653 /**
4654  * ipr_target_alloc - Prepare for commands to a SCSI target
4655  * @starget:    scsi target struct
4656  *
4657  * If the device is a SATA device, this function allocates an
4658  * ATA port with libata, else it does nothing.
4659  *
4660  * Return value:
4661  *      0 on success / non-0 on failure
4662  **/
4663 static int ipr_target_alloc(struct scsi_target *starget)
4664 {
4665         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4666         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4667         struct ipr_sata_port *sata_port;
4668         struct ata_port *ap;
4669         struct ipr_resource_entry *res;
4670         unsigned long lock_flags;
4671
4672         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4673         res = ipr_find_starget(starget);
4674         starget->hostdata = NULL;
4675
4676         if (res && ipr_is_gata(res)) {
4677                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4678                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4679                 if (!sata_port)
4680                         return -ENOMEM;
4681
4682                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4683                 if (ap) {
4684                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4685                         sata_port->ioa_cfg = ioa_cfg;
4686                         sata_port->ap = ap;
4687                         sata_port->res = res;
4688
4689                         res->sata_port = sata_port;
4690                         ap->private_data = sata_port;
4691                         starget->hostdata = sata_port;
4692                 } else {
4693                         kfree(sata_port);
4694                         return -ENOMEM;
4695                 }
4696         }
4697         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4698
4699         return 0;
4700 }
4701
4702 /**
4703  * ipr_target_destroy - Destroy a SCSI target
4704  * @starget:    scsi target struct
4705  *
4706  * If the device was a SATA device, this function frees the libata
4707  * ATA port, else it does nothing.
4708  *
4709  **/
4710 static void ipr_target_destroy(struct scsi_target *starget)
4711 {
4712         struct ipr_sata_port *sata_port = starget->hostdata;
4713         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4714         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4715
4716         if (ioa_cfg->sis64) {
4717                 if (!ipr_find_starget(starget)) {
4718                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4719                                 clear_bit(starget->id, ioa_cfg->array_ids);
4720                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4721                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4722                         else if (starget->channel == 0)
4723                                 clear_bit(starget->id, ioa_cfg->target_ids);
4724                 }
4725         }
4726
4727         if (sata_port) {
4728                 starget->hostdata = NULL;
4729                 ata_sas_port_destroy(sata_port->ap);
4730                 kfree(sata_port);
4731         }
4732 }
4733
4734 /**
4735  * ipr_find_sdev - Find device based on bus/target/lun.
4736  * @sdev:       scsi device struct
4737  *
4738  * Return value:
4739  *      resource entry pointer if found / NULL if not found
4740  **/
4741 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4742 {
4743         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4744         struct ipr_resource_entry *res;
4745
4746         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4747                 if ((res->bus == sdev->channel) &&
4748                     (res->target == sdev->id) &&
4749                     (res->lun == sdev->lun))
4750                         return res;
4751         }
4752
4753         return NULL;
4754 }
4755
4756 /**
4757  * ipr_slave_destroy - Unconfigure a SCSI device
4758  * @sdev:       scsi device struct
4759  *
4760  * Return value:
4761  *      nothing
4762  **/
4763 static void ipr_slave_destroy(struct scsi_device *sdev)
4764 {
4765         struct ipr_resource_entry *res;
4766         struct ipr_ioa_cfg *ioa_cfg;
4767         unsigned long lock_flags = 0;
4768
4769         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4770
4771         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4772         res = (struct ipr_resource_entry *) sdev->hostdata;
4773         if (res) {
4774                 if (res->sata_port)
4775                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4776                 sdev->hostdata = NULL;
4777                 res->sdev = NULL;
4778                 res->sata_port = NULL;
4779         }
4780         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4781 }
4782
4783 /**
4784  * ipr_slave_configure - Configure a SCSI device
4785  * @sdev:       scsi device struct
4786  *
4787  * This function configures the specified scsi device.
4788  *
4789  * Return value:
4790  *      0 on success
4791  **/
4792 static int ipr_slave_configure(struct scsi_device *sdev)
4793 {
4794         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4795         struct ipr_resource_entry *res;
4796         struct ata_port *ap = NULL;
4797         unsigned long lock_flags = 0;
4798         char buffer[IPR_MAX_RES_PATH_LENGTH];
4799
4800         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4801         res = sdev->hostdata;
4802         if (res) {
4803                 if (ipr_is_af_dasd_device(res))
4804                         sdev->type = TYPE_RAID;
4805                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4806                         sdev->scsi_level = 4;
4807                         sdev->no_uld_attach = 1;
4808                 }
4809                 if (ipr_is_vset_device(res)) {
4810                         sdev->scsi_level = SCSI_SPC_3;
4811                         blk_queue_rq_timeout(sdev->request_queue,
4812                                              IPR_VSET_RW_TIMEOUT);
4813                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4814                 }
4815                 if (ipr_is_gata(res) && res->sata_port)
4816                         ap = res->sata_port->ap;
4817                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4818
4819                 if (ap) {
4820                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4821                         ata_sas_slave_configure(sdev, ap);
4822                 }
4823
4824                 if (ioa_cfg->sis64)
4825                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4826                                     ipr_format_res_path(ioa_cfg,
4827                                 res->res_path, buffer, sizeof(buffer)));
4828                 return 0;
4829         }
4830         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4831         return 0;
4832 }
4833
4834 /**
4835  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4836  * @sdev:       scsi device struct
4837  *
4838  * This function initializes an ATA port so that future commands
4839  * sent through queuecommand will work.
4840  *
4841  * Return value:
4842  *      0 on success
4843  **/
4844 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4845 {
4846         struct ipr_sata_port *sata_port = NULL;
4847         int rc = -ENXIO;
4848
4849         ENTER;
4850         if (sdev->sdev_target)
4851                 sata_port = sdev->sdev_target->hostdata;
4852         if (sata_port) {
4853                 rc = ata_sas_port_init(sata_port->ap);
4854                 if (rc == 0)
4855                         rc = ata_sas_sync_probe(sata_port->ap);
4856         }
4857
4858         if (rc)
4859                 ipr_slave_destroy(sdev);
4860
4861         LEAVE;
4862         return rc;
4863 }
4864
4865 /**
4866  * ipr_slave_alloc - Prepare for commands to a device.
4867  * @sdev:       scsi device struct
4868  *
4869  * This function saves a pointer to the resource entry
4870  * in the scsi device struct if the device exists. We
4871  * can then use this pointer in ipr_queuecommand when
4872  * handling new commands.
4873  *
4874  * Return value:
4875  *      0 on success / -ENXIO if device does not exist
4876  **/
4877 static int ipr_slave_alloc(struct scsi_device *sdev)
4878 {
4879         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4880         struct ipr_resource_entry *res;
4881         unsigned long lock_flags;
4882         int rc = -ENXIO;
4883
4884         sdev->hostdata = NULL;
4885
4886         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4887
4888         res = ipr_find_sdev(sdev);
4889         if (res) {
4890                 res->sdev = sdev;
4891                 res->add_to_ml = 0;
4892                 res->in_erp = 0;
4893                 sdev->hostdata = res;
4894                 if (!ipr_is_naca_model(res))
4895                         res->needs_sync_complete = 1;
4896                 rc = 0;
4897                 if (ipr_is_gata(res)) {
4898                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4899                         return ipr_ata_slave_alloc(sdev);
4900                 }
4901         }
4902
4903         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4904
4905         return rc;
4906 }
4907
4908 /**
4909  * ipr_match_lun - Match function for specified LUN
4910  * @ipr_cmd:    ipr command struct
4911  * @device:             device to match (sdev)
4912  *
4913  * Returns:
4914  *      1 if command matches sdev / 0 if command does not match sdev
4915  **/
4916 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4917 {
4918         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4919                 return 1;
4920         return 0;
4921 }
4922
4923 /**
4924  * ipr_wait_for_ops - Wait for matching commands to complete
4925  * @ipr_cmd:    ipr command struct
4926  * @device:             device to match (sdev)
4927  * @match:              match function to use
4928  *
4929  * Returns:
4930  *      SUCCESS / FAILED
4931  **/
4932 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4933                             int (*match)(struct ipr_cmnd *, void *))
4934 {
4935         struct ipr_cmnd *ipr_cmd;
4936         int wait;
4937         unsigned long flags;
4938         struct ipr_hrr_queue *hrrq;
4939         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4940         DECLARE_COMPLETION_ONSTACK(comp);
4941
4942         ENTER;
4943         do {
4944                 wait = 0;
4945
4946                 for_each_hrrq(hrrq, ioa_cfg) {
4947                         spin_lock_irqsave(hrrq->lock, flags);
4948                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4949                                 if (match(ipr_cmd, device)) {
4950                                         ipr_cmd->eh_comp = &comp;
4951                                         wait++;
4952                                 }
4953                         }
4954                         spin_unlock_irqrestore(hrrq->lock, flags);
4955                 }
4956
4957                 if (wait) {
4958                         timeout = wait_for_completion_timeout(&comp, timeout);
4959
4960                         if (!timeout) {
4961                                 wait = 0;
4962
4963                                 for_each_hrrq(hrrq, ioa_cfg) {
4964                                         spin_lock_irqsave(hrrq->lock, flags);
4965                                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4966                                                 if (match(ipr_cmd, device)) {
4967                                                         ipr_cmd->eh_comp = NULL;
4968                                                         wait++;
4969                                                 }
4970                                         }
4971                                         spin_unlock_irqrestore(hrrq->lock, flags);
4972                                 }
4973
4974                                 if (wait)
4975                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4976                                 LEAVE;
4977                                 return wait ? FAILED : SUCCESS;
4978                         }
4979                 }
4980         } while (wait);
4981
4982         LEAVE;
4983         return SUCCESS;
4984 }
4985
4986 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4987 {
4988         struct ipr_ioa_cfg *ioa_cfg;
4989         unsigned long lock_flags = 0;
4990         int rc = SUCCESS;
4991
4992         ENTER;
4993         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4994         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4995
4996         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4997                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4998                 dev_err(&ioa_cfg->pdev->dev,
4999                         "Adapter being reset as a result of error recovery.\n");
5000
5001                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5002                         ioa_cfg->sdt_state = GET_DUMP;
5003         }
5004
5005         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5006         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5007         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5008
5009         /* If we got hit with a host reset while we were already resetting
5010          the adapter for some reason, and the reset failed. */
5011         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5012                 ipr_trace;
5013                 rc = FAILED;
5014         }
5015
5016         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5017         LEAVE;
5018         return rc;
5019 }
5020
5021 /**
5022  * ipr_device_reset - Reset the device
5023  * @ioa_cfg:    ioa config struct
5024  * @res:                resource entry struct
5025  *
5026  * This function issues a device reset to the affected device.
5027  * If the device is a SCSI device, a LUN reset will be sent
5028  * to the device first. If that does not work, a target reset
5029  * will be sent. If the device is a SATA device, a PHY reset will
5030  * be sent.
5031  *
5032  * Return value:
5033  *      0 on success / non-zero on failure
5034  **/
5035 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5036                             struct ipr_resource_entry *res)
5037 {
5038         struct ipr_cmnd *ipr_cmd;
5039         struct ipr_ioarcb *ioarcb;
5040         struct ipr_cmd_pkt *cmd_pkt;
5041         struct ipr_ioarcb_ata_regs *regs;
5042         u32 ioasc;
5043
5044         ENTER;
5045         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5046         ioarcb = &ipr_cmd->ioarcb;
5047         cmd_pkt = &ioarcb->cmd_pkt;
5048
5049         if (ipr_cmd->ioa_cfg->sis64) {
5050                 regs = &ipr_cmd->i.ata_ioadl.regs;
5051                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5052         } else
5053                 regs = &ioarcb->u.add_data.u.regs;
5054
5055         ioarcb->res_handle = res->res_handle;
5056         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5057         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5058         if (ipr_is_gata(res)) {
5059                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5060                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5061                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5062         }
5063
5064         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5065         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5066         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5067         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5068                 if (ipr_cmd->ioa_cfg->sis64)
5069                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5070                                sizeof(struct ipr_ioasa_gata));
5071                 else
5072                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5073                                sizeof(struct ipr_ioasa_gata));
5074         }
5075
5076         LEAVE;
5077         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5078 }
5079
5080 /**
5081  * ipr_sata_reset - Reset the SATA port
5082  * @link:       SATA link to reset
5083  * @classes:    class of the attached device
5084  *
5085  * This function issues a SATA phy reset to the affected ATA link.
5086  *
5087  * Return value:
5088  *      0 on success / non-zero on failure
5089  **/
5090 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5091                                 unsigned long deadline)
5092 {
5093         struct ipr_sata_port *sata_port = link->ap->private_data;
5094         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5095         struct ipr_resource_entry *res;
5096         unsigned long lock_flags = 0;
5097         int rc = -ENXIO;
5098
5099         ENTER;
5100         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5101         while (ioa_cfg->in_reset_reload) {
5102                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5103                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5104                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5105         }
5106
5107         res = sata_port->res;
5108         if (res) {
5109                 rc = ipr_device_reset(ioa_cfg, res);
5110                 *classes = res->ata_class;
5111         }
5112
5113         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5114         LEAVE;
5115         return rc;
5116 }
5117
5118 /**
5119  * ipr_eh_dev_reset - Reset the device
5120  * @scsi_cmd:   scsi command struct
5121  *
5122  * This function issues a device reset to the affected device.
5123  * A LUN reset will be sent to the device first. If that does
5124  * not work, a target reset will be sent.
5125  *
5126  * Return value:
5127  *      SUCCESS / FAILED
5128  **/
5129 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5130 {
5131         struct ipr_cmnd *ipr_cmd;
5132         struct ipr_ioa_cfg *ioa_cfg;
5133         struct ipr_resource_entry *res;
5134         struct ata_port *ap;
5135         int rc = 0;
5136         struct ipr_hrr_queue *hrrq;
5137
5138         ENTER;
5139         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5140         res = scsi_cmd->device->hostdata;
5141
5142         if (!res)
5143                 return FAILED;
5144
5145         /*
5146          * If we are currently going through reset/reload, return failed. This will force the
5147          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5148          * reset to complete
5149          */
5150         if (ioa_cfg->in_reset_reload)
5151                 return FAILED;
5152         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5153                 return FAILED;
5154
5155         for_each_hrrq(hrrq, ioa_cfg) {
5156                 spin_lock(&hrrq->_lock);
5157                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5158                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5159                                 if (ipr_cmd->scsi_cmd)
5160                                         ipr_cmd->done = ipr_scsi_eh_done;
5161                                 if (ipr_cmd->qc)
5162                                         ipr_cmd->done = ipr_sata_eh_done;
5163                                 if (ipr_cmd->qc &&
5164                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5165                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5166                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5167                                 }
5168                         }
5169                 }
5170                 spin_unlock(&hrrq->_lock);
5171         }
5172         res->resetting_device = 1;
5173         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5174
5175         if (ipr_is_gata(res) && res->sata_port) {
5176                 ap = res->sata_port->ap;
5177                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5178                 ata_std_error_handler(ap);
5179                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5180
5181                 for_each_hrrq(hrrq, ioa_cfg) {
5182                         spin_lock(&hrrq->_lock);
5183                         list_for_each_entry(ipr_cmd,
5184                                             &hrrq->hrrq_pending_q, queue) {
5185                                 if (ipr_cmd->ioarcb.res_handle ==
5186                                     res->res_handle) {
5187                                         rc = -EIO;
5188                                         break;
5189                                 }
5190                         }
5191                         spin_unlock(&hrrq->_lock);
5192                 }
5193         } else
5194                 rc = ipr_device_reset(ioa_cfg, res);
5195         res->resetting_device = 0;
5196         res->reset_occurred = 1;
5197
5198         LEAVE;
5199         return rc ? FAILED : SUCCESS;
5200 }
5201
5202 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5203 {
5204         int rc;
5205         struct ipr_ioa_cfg *ioa_cfg;
5206
5207         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5208
5209         spin_lock_irq(cmd->device->host->host_lock);
5210         rc = __ipr_eh_dev_reset(cmd);
5211         spin_unlock_irq(cmd->device->host->host_lock);
5212
5213         if (rc == SUCCESS)
5214                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5215
5216         return rc;
5217 }
5218
5219 /**
5220  * ipr_bus_reset_done - Op done function for bus reset.
5221  * @ipr_cmd:    ipr command struct
5222  *
5223  * This function is the op done function for a bus reset
5224  *
5225  * Return value:
5226  *      none
5227  **/
5228 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5229 {
5230         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5231         struct ipr_resource_entry *res;
5232
5233         ENTER;
5234         if (!ioa_cfg->sis64)
5235                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5236                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5237                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5238                                 break;
5239                         }
5240                 }
5241
5242         /*
5243          * If abort has not completed, indicate the reset has, else call the
5244          * abort's done function to wake the sleeping eh thread
5245          */
5246         if (ipr_cmd->sibling->sibling)
5247                 ipr_cmd->sibling->sibling = NULL;
5248         else
5249                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5250
5251         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5252         LEAVE;
5253 }
5254
5255 /**
5256  * ipr_abort_timeout - An abort task has timed out
5257  * @ipr_cmd:    ipr command struct
5258  *
5259  * This function handles when an abort task times out. If this
5260  * happens we issue a bus reset since we have resources tied
5261  * up that must be freed before returning to the midlayer.
5262  *
5263  * Return value:
5264  *      none
5265  **/
5266 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5267 {
5268         struct ipr_cmnd *reset_cmd;
5269         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5270         struct ipr_cmd_pkt *cmd_pkt;
5271         unsigned long lock_flags = 0;
5272
5273         ENTER;
5274         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5275         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5276                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5277                 return;
5278         }
5279
5280         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5281         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5282         ipr_cmd->sibling = reset_cmd;
5283         reset_cmd->sibling = ipr_cmd;
5284         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5285         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5286         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5287         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5288         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5289
5290         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5291         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5292         LEAVE;
5293 }
5294
5295 /**
5296  * ipr_cancel_op - Cancel specified op
5297  * @scsi_cmd:   scsi command struct
5298  *
5299  * This function cancels specified op.
5300  *
5301  * Return value:
5302  *      SUCCESS / FAILED
5303  **/
5304 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5305 {
5306         struct ipr_cmnd *ipr_cmd;
5307         struct ipr_ioa_cfg *ioa_cfg;
5308         struct ipr_resource_entry *res;
5309         struct ipr_cmd_pkt *cmd_pkt;
5310         u32 ioasc, int_reg;
5311         int op_found = 0;
5312         struct ipr_hrr_queue *hrrq;
5313
5314         ENTER;
5315         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5316         res = scsi_cmd->device->hostdata;
5317
5318         /* If we are currently going through reset/reload, return failed.
5319          * This will force the mid-layer to call ipr_eh_host_reset,
5320          * which will then go to sleep and wait for the reset to complete
5321          */
5322         if (ioa_cfg->in_reset_reload ||
5323             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5324                 return FAILED;
5325         if (!res)
5326                 return FAILED;
5327
5328         /*
5329          * If we are aborting a timed out op, chances are that the timeout was caused
5330          * by a still not detected EEH error. In such cases, reading a register will
5331          * trigger the EEH recovery infrastructure.
5332          */
5333         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5334
5335         if (!ipr_is_gscsi(res))
5336                 return FAILED;
5337
5338         for_each_hrrq(hrrq, ioa_cfg) {
5339                 spin_lock(&hrrq->_lock);
5340                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5341                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5342                                 ipr_cmd->done = ipr_scsi_eh_done;
5343                                 op_found = 1;
5344                                 break;
5345                         }
5346                 }
5347                 spin_unlock(&hrrq->_lock);
5348         }
5349
5350         if (!op_found)
5351                 return SUCCESS;
5352
5353         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5354         ipr_cmd->ioarcb.res_handle = res->res_handle;
5355         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5356         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5357         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5358         ipr_cmd->u.sdev = scsi_cmd->device;
5359
5360         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5361                     scsi_cmd->cmnd[0]);
5362         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5363         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5364
5365         /*
5366          * If the abort task timed out and we sent a bus reset, we will get
5367          * one the following responses to the abort
5368          */
5369         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5370                 ioasc = 0;
5371                 ipr_trace;
5372         }
5373
5374         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5375         if (!ipr_is_naca_model(res))
5376                 res->needs_sync_complete = 1;
5377
5378         LEAVE;
5379         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5380 }
5381
5382 /**
5383  * ipr_eh_abort - Abort a single op
5384  * @scsi_cmd:   scsi command struct
5385  *
5386  * Return value:
5387  *      0 if scan in progress / 1 if scan is complete
5388  **/
5389 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5390 {
5391         unsigned long lock_flags;
5392         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5393         int rc = 0;
5394
5395         spin_lock_irqsave(shost->host_lock, lock_flags);
5396         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5397                 rc = 1;
5398         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5399                 rc = 1;
5400         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5401         return rc;
5402 }
5403
5404 /**
5405  * ipr_eh_host_reset - Reset the host adapter
5406  * @scsi_cmd:   scsi command struct
5407  *
5408  * Return value:
5409  *      SUCCESS / FAILED
5410  **/
5411 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5412 {
5413         unsigned long flags;
5414         int rc;
5415         struct ipr_ioa_cfg *ioa_cfg;
5416
5417         ENTER;
5418
5419         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5420
5421         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5422         rc = ipr_cancel_op(scsi_cmd);
5423         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5424
5425         if (rc == SUCCESS)
5426                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5427         LEAVE;
5428         return rc;
5429 }
5430
5431 /**
5432  * ipr_handle_other_interrupt - Handle "other" interrupts
5433  * @ioa_cfg:    ioa config struct
5434  * @int_reg:    interrupt register
5435  *
5436  * Return value:
5437  *      IRQ_NONE / IRQ_HANDLED
5438  **/
5439 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5440                                               u32 int_reg)
5441 {
5442         irqreturn_t rc = IRQ_HANDLED;
5443         u32 int_mask_reg;
5444
5445         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5446         int_reg &= ~int_mask_reg;
5447
5448         /* If an interrupt on the adapter did not occur, ignore it.
5449          * Or in the case of SIS 64, check for a stage change interrupt.
5450          */
5451         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5452                 if (ioa_cfg->sis64) {
5453                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5454                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5455                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5456
5457                                 /* clear stage change */
5458                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5459                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5460                                 list_del(&ioa_cfg->reset_cmd->queue);
5461                                 del_timer(&ioa_cfg->reset_cmd->timer);
5462                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5463                                 return IRQ_HANDLED;
5464                         }
5465                 }
5466
5467                 return IRQ_NONE;
5468         }
5469
5470         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5471                 /* Mask the interrupt */
5472                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5473                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5474
5475                 list_del(&ioa_cfg->reset_cmd->queue);
5476                 del_timer(&ioa_cfg->reset_cmd->timer);
5477                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5478         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5479                 if (ioa_cfg->clear_isr) {
5480                         if (ipr_debug && printk_ratelimit())
5481                                 dev_err(&ioa_cfg->pdev->dev,
5482                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5483                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5484                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5485                         return IRQ_NONE;
5486                 }
5487         } else {
5488                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5489                         ioa_cfg->ioa_unit_checked = 1;
5490                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5491                         dev_err(&ioa_cfg->pdev->dev,
5492                                 "No Host RRQ. 0x%08X\n", int_reg);
5493                 else
5494                         dev_err(&ioa_cfg->pdev->dev,
5495                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5496
5497                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5498                         ioa_cfg->sdt_state = GET_DUMP;
5499
5500                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5501                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5502         }
5503
5504         return rc;
5505 }
5506
5507 /**
5508  * ipr_isr_eh - Interrupt service routine error handler
5509  * @ioa_cfg:    ioa config struct
5510  * @msg:        message to log
5511  *
5512  * Return value:
5513  *      none
5514  **/
5515 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5516 {
5517         ioa_cfg->errors_logged++;
5518         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5519
5520         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5521                 ioa_cfg->sdt_state = GET_DUMP;
5522
5523         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5524 }
5525
5526 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5527                                                 struct list_head *doneq)
5528 {
5529         u32 ioasc;
5530         u16 cmd_index;
5531         struct ipr_cmnd *ipr_cmd;
5532         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5533         int num_hrrq = 0;
5534
5535         /* If interrupts are disabled, ignore the interrupt */
5536         if (!hrr_queue->allow_interrupts)
5537                 return 0;
5538
5539         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5540                hrr_queue->toggle_bit) {
5541
5542                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5543                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5544                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5545
5546                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5547                              cmd_index < hrr_queue->min_cmd_id)) {
5548                         ipr_isr_eh(ioa_cfg,
5549                                 "Invalid response handle from IOA: ",
5550                                 cmd_index);
5551                         break;
5552                 }
5553
5554                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5555                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5556
5557                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5558
5559                 list_move_tail(&ipr_cmd->queue, doneq);
5560
5561                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5562                         hrr_queue->hrrq_curr++;
5563                 } else {
5564                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5565                         hrr_queue->toggle_bit ^= 1u;
5566                 }
5567                 num_hrrq++;
5568                 if (budget > 0 && num_hrrq >= budget)
5569                         break;
5570         }
5571
5572         return num_hrrq;
5573 }
5574
5575 static int ipr_iopoll(struct irq_poll *iop, int budget)
5576 {
5577         struct ipr_ioa_cfg *ioa_cfg;
5578         struct ipr_hrr_queue *hrrq;
5579         struct ipr_cmnd *ipr_cmd, *temp;
5580         unsigned long hrrq_flags;
5581         int completed_ops;
5582         LIST_HEAD(doneq);
5583
5584         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5585         ioa_cfg = hrrq->ioa_cfg;
5586
5587         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5588         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5589
5590         if (completed_ops < budget)
5591                 irq_poll_complete(iop);
5592         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5593
5594         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5595                 list_del(&ipr_cmd->queue);
5596                 del_timer(&ipr_cmd->timer);
5597                 ipr_cmd->fast_done(ipr_cmd);
5598         }
5599
5600         return completed_ops;
5601 }
5602
5603 /**
5604  * ipr_isr - Interrupt service routine
5605  * @irq:        irq number
5606  * @devp:       pointer to ioa config struct
5607  *
5608  * Return value:
5609  *      IRQ_NONE / IRQ_HANDLED
5610  **/
5611 static irqreturn_t ipr_isr(int irq, void *devp)
5612 {
5613         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5614         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5615         unsigned long hrrq_flags = 0;
5616         u32 int_reg = 0;
5617         int num_hrrq = 0;
5618         int irq_none = 0;
5619         struct ipr_cmnd *ipr_cmd, *temp;
5620         irqreturn_t rc = IRQ_NONE;
5621         LIST_HEAD(doneq);
5622
5623         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5624         /* If interrupts are disabled, ignore the interrupt */
5625         if (!hrrq->allow_interrupts) {
5626                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5627                 return IRQ_NONE;
5628         }
5629
5630         while (1) {
5631                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5632                         rc =  IRQ_HANDLED;
5633
5634                         if (!ioa_cfg->clear_isr)
5635                                 break;
5636
5637                         /* Clear the PCI interrupt */
5638                         num_hrrq = 0;
5639                         do {
5640                                 writel(IPR_PCII_HRRQ_UPDATED,
5641                                      ioa_cfg->regs.clr_interrupt_reg32);
5642                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5643                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5644                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5645
5646                 } else if (rc == IRQ_NONE && irq_none == 0) {
5647                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5648                         irq_none++;
5649                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5650                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5651                         ipr_isr_eh(ioa_cfg,
5652                                 "Error clearing HRRQ: ", num_hrrq);
5653                         rc = IRQ_HANDLED;
5654                         break;
5655                 } else
5656                         break;
5657         }
5658
5659         if (unlikely(rc == IRQ_NONE))
5660                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5661
5662         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5663         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5664                 list_del(&ipr_cmd->queue);
5665                 del_timer(&ipr_cmd->timer);
5666                 ipr_cmd->fast_done(ipr_cmd);
5667         }
5668         return rc;
5669 }
5670
5671 /**
5672  * ipr_isr_mhrrq - Interrupt service routine
5673  * @irq:        irq number
5674  * @devp:       pointer to ioa config struct
5675  *
5676  * Return value:
5677  *      IRQ_NONE / IRQ_HANDLED
5678  **/
5679 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5680 {
5681         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5682         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5683         unsigned long hrrq_flags = 0;
5684         struct ipr_cmnd *ipr_cmd, *temp;
5685         irqreturn_t rc = IRQ_NONE;
5686         LIST_HEAD(doneq);
5687
5688         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5689
5690         /* If interrupts are disabled, ignore the interrupt */
5691         if (!hrrq->allow_interrupts) {
5692                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5693                 return IRQ_NONE;
5694         }
5695
5696         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5697                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5698                        hrrq->toggle_bit) {
5699                         irq_poll_sched(&hrrq->iopoll);
5700                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5701                         return IRQ_HANDLED;
5702                 }
5703         } else {
5704                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5705                         hrrq->toggle_bit)
5706
5707                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5708                                 rc =  IRQ_HANDLED;
5709         }
5710
5711         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5712
5713         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5714                 list_del(&ipr_cmd->queue);
5715                 del_timer(&ipr_cmd->timer);
5716                 ipr_cmd->fast_done(ipr_cmd);
5717         }
5718         return rc;
5719 }
5720
5721 /**
5722  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5723  * @ioa_cfg:    ioa config struct
5724  * @ipr_cmd:    ipr command struct
5725  *
5726  * Return value:
5727  *      0 on success / -1 on failure
5728  **/
5729 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5730                              struct ipr_cmnd *ipr_cmd)
5731 {
5732         int i, nseg;
5733         struct scatterlist *sg;
5734         u32 length;
5735         u32 ioadl_flags = 0;
5736         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5737         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5738         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5739
5740         length = scsi_bufflen(scsi_cmd);
5741         if (!length)
5742                 return 0;
5743
5744         nseg = scsi_dma_map(scsi_cmd);
5745         if (nseg < 0) {
5746                 if (printk_ratelimit())
5747                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5748                 return -1;
5749         }
5750
5751         ipr_cmd->dma_use_sg = nseg;
5752
5753         ioarcb->data_transfer_length = cpu_to_be32(length);
5754         ioarcb->ioadl_len =
5755                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5756
5757         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5758                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5759                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5760         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5761                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5762
5763         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5764                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5765                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5766                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5767         }
5768
5769         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5770         return 0;
5771 }
5772
5773 /**
5774  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5775  * @ioa_cfg:    ioa config struct
5776  * @ipr_cmd:    ipr command struct
5777  *
5778  * Return value:
5779  *      0 on success / -1 on failure
5780  **/
5781 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5782                            struct ipr_cmnd *ipr_cmd)
5783 {
5784         int i, nseg;
5785         struct scatterlist *sg;
5786         u32 length;
5787         u32 ioadl_flags = 0;
5788         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5789         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5790         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5791
5792         length = scsi_bufflen(scsi_cmd);
5793         if (!length)
5794                 return 0;
5795
5796         nseg = scsi_dma_map(scsi_cmd);
5797         if (nseg < 0) {
5798                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5799                 return -1;
5800         }
5801
5802         ipr_cmd->dma_use_sg = nseg;
5803
5804         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5805                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5806                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5807                 ioarcb->data_transfer_length = cpu_to_be32(length);
5808                 ioarcb->ioadl_len =
5809                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5810         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5811                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5812                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5813                 ioarcb->read_ioadl_len =
5814                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5815         }
5816
5817         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5818                 ioadl = ioarcb->u.add_data.u.ioadl;
5819                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5820                                     offsetof(struct ipr_ioarcb, u.add_data));
5821                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5822         }
5823
5824         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5825                 ioadl[i].flags_and_data_len =
5826                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5827                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5828         }
5829
5830         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5831         return 0;
5832 }
5833
5834 /**
5835  * ipr_erp_done - Process completion of ERP for a device
5836  * @ipr_cmd:            ipr command struct
5837  *
5838  * This function copies the sense buffer into the scsi_cmd
5839  * struct and pushes the scsi_done function.
5840  *
5841  * Return value:
5842  *      nothing
5843  **/
5844 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5845 {
5846         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5847         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5848         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5849
5850         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5851                 scsi_cmd->result |= (DID_ERROR << 16);
5852                 scmd_printk(KERN_ERR, scsi_cmd,
5853                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5854         } else {
5855                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5856                        SCSI_SENSE_BUFFERSIZE);
5857         }
5858
5859         if (res) {
5860                 if (!ipr_is_naca_model(res))
5861                         res->needs_sync_complete = 1;
5862                 res->in_erp = 0;
5863         }
5864         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5865         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5866         scsi_cmd->scsi_done(scsi_cmd);
5867 }
5868
5869 /**
5870  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5871  * @ipr_cmd:    ipr command struct
5872  *
5873  * Return value:
5874  *      none
5875  **/
5876 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5877 {
5878         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5879         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5880         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5881
5882         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5883         ioarcb->data_transfer_length = 0;
5884         ioarcb->read_data_transfer_length = 0;
5885         ioarcb->ioadl_len = 0;
5886         ioarcb->read_ioadl_len = 0;
5887         ioasa->hdr.ioasc = 0;
5888         ioasa->hdr.residual_data_len = 0;
5889
5890         if (ipr_cmd->ioa_cfg->sis64)
5891                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5892                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5893         else {
5894                 ioarcb->write_ioadl_addr =
5895                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5896                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5897         }
5898 }
5899
5900 /**
5901  * ipr_erp_request_sense - Send request sense to a device
5902  * @ipr_cmd:    ipr command struct
5903  *
5904  * This function sends a request sense to a device as a result
5905  * of a check condition.
5906  *
5907  * Return value:
5908  *      nothing
5909  **/
5910 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5911 {
5912         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5913         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5914
5915         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5916                 ipr_erp_done(ipr_cmd);
5917                 return;
5918         }
5919
5920         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5921
5922         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5923         cmd_pkt->cdb[0] = REQUEST_SENSE;
5924         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5925         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5926         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5927         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5928
5929         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5930                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5931
5932         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5933                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5934 }
5935
5936 /**
5937  * ipr_erp_cancel_all - Send cancel all to a device
5938  * @ipr_cmd:    ipr command struct
5939  *
5940  * This function sends a cancel all to a device to clear the
5941  * queue. If we are running TCQ on the device, QERR is set to 1,
5942  * which means all outstanding ops have been dropped on the floor.
5943  * Cancel all will return them to us.
5944  *
5945  * Return value:
5946  *      nothing
5947  **/
5948 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5949 {
5950         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5951         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5952         struct ipr_cmd_pkt *cmd_pkt;
5953
5954         res->in_erp = 1;
5955
5956         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5957
5958         if (!scsi_cmd->device->simple_tags) {
5959                 ipr_erp_request_sense(ipr_cmd);
5960                 return;
5961         }
5962
5963         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5964         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5965         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5966
5967         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5968                    IPR_CANCEL_ALL_TIMEOUT);
5969 }
5970
5971 /**
5972  * ipr_dump_ioasa - Dump contents of IOASA
5973  * @ioa_cfg:    ioa config struct
5974  * @ipr_cmd:    ipr command struct
5975  * @res:                resource entry struct
5976  *
5977  * This function is invoked by the interrupt handler when ops
5978  * fail. It will log the IOASA if appropriate. Only called
5979  * for GPDD ops.
5980  *
5981  * Return value:
5982  *      none
5983  **/
5984 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5985                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5986 {
5987         int i;
5988         u16 data_len;
5989         u32 ioasc, fd_ioasc;
5990         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5991         __be32 *ioasa_data = (__be32 *)ioasa;
5992         int error_index;
5993
5994         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5995         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5996
5997         if (0 == ioasc)
5998                 return;
5999
6000         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6001                 return;
6002
6003         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6004                 error_index = ipr_get_error(fd_ioasc);
6005         else
6006                 error_index = ipr_get_error(ioasc);
6007
6008         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6009                 /* Don't log an error if the IOA already logged one */
6010                 if (ioasa->hdr.ilid != 0)
6011                         return;
6012
6013                 if (!ipr_is_gscsi(res))
6014                         return;
6015
6016                 if (ipr_error_table[error_index].log_ioasa == 0)
6017                         return;
6018         }
6019
6020         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6021
6022         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6023         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6024                 data_len = sizeof(struct ipr_ioasa64);
6025         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6026                 data_len = sizeof(struct ipr_ioasa);
6027
6028         ipr_err("IOASA Dump:\n");
6029
6030         for (i = 0; i < data_len / 4; i += 4) {
6031                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6032                         be32_to_cpu(ioasa_data[i]),
6033                         be32_to_cpu(ioasa_data[i+1]),
6034                         be32_to_cpu(ioasa_data[i+2]),
6035                         be32_to_cpu(ioasa_data[i+3]));
6036         }
6037 }
6038
6039 /**
6040  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6041  * @ioasa:              IOASA
6042  * @sense_buf:  sense data buffer
6043  *
6044  * Return value:
6045  *      none
6046  **/
6047 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6048 {
6049         u32 failing_lba;
6050         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6051         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6052         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6053         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6054
6055         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6056
6057         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6058                 return;
6059
6060         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6061
6062         if (ipr_is_vset_device(res) &&
6063             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6064             ioasa->u.vset.failing_lba_hi != 0) {
6065                 sense_buf[0] = 0x72;
6066                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6067                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6068                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6069
6070                 sense_buf[7] = 12;
6071                 sense_buf[8] = 0;
6072                 sense_buf[9] = 0x0A;
6073                 sense_buf[10] = 0x80;
6074
6075                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6076
6077                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6078                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6079                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6080                 sense_buf[15] = failing_lba & 0x000000ff;
6081
6082                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6083
6084                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6085                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6086                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6087                 sense_buf[19] = failing_lba & 0x000000ff;
6088         } else {
6089                 sense_buf[0] = 0x70;
6090                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6091                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6092                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6093
6094                 /* Illegal request */
6095                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6096                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6097                         sense_buf[7] = 10;      /* additional length */
6098
6099                         /* IOARCB was in error */
6100                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6101                                 sense_buf[15] = 0xC0;
6102                         else    /* Parameter data was invalid */
6103                                 sense_buf[15] = 0x80;
6104
6105                         sense_buf[16] =
6106                             ((IPR_FIELD_POINTER_MASK &
6107                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6108                         sense_buf[17] =
6109                             (IPR_FIELD_POINTER_MASK &
6110                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6111                 } else {
6112                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6113                                 if (ipr_is_vset_device(res))
6114                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6115                                 else
6116                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6117
6118                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6119                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6120                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6121                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6122                                 sense_buf[6] = failing_lba & 0x000000ff;
6123                         }
6124
6125                         sense_buf[7] = 6;       /* additional length */
6126                 }
6127         }
6128 }
6129
6130 /**
6131  * ipr_get_autosense - Copy autosense data to sense buffer
6132  * @ipr_cmd:    ipr command struct
6133  *
6134  * This function copies the autosense buffer to the buffer
6135  * in the scsi_cmd, if there is autosense available.
6136  *
6137  * Return value:
6138  *      1 if autosense was available / 0 if not
6139  **/
6140 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6141 {
6142         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6143         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6144
6145         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6146                 return 0;
6147
6148         if (ipr_cmd->ioa_cfg->sis64)
6149                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6150                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6151                            SCSI_SENSE_BUFFERSIZE));
6152         else
6153                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6154                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6155                            SCSI_SENSE_BUFFERSIZE));
6156         return 1;
6157 }
6158
6159 /**
6160  * ipr_erp_start - Process an error response for a SCSI op
6161  * @ioa_cfg:    ioa config struct
6162  * @ipr_cmd:    ipr command struct
6163  *
6164  * This function determines whether or not to initiate ERP
6165  * on the affected device.
6166  *
6167  * Return value:
6168  *      nothing
6169  **/
6170 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6171                               struct ipr_cmnd *ipr_cmd)
6172 {
6173         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6174         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6175         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6176         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6177
6178         if (!res) {
6179                 ipr_scsi_eh_done(ipr_cmd);
6180                 return;
6181         }
6182
6183         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6184                 ipr_gen_sense(ipr_cmd);
6185
6186         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6187
6188         switch (masked_ioasc) {
6189         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6190                 if (ipr_is_naca_model(res))
6191                         scsi_cmd->result |= (DID_ABORT << 16);
6192                 else
6193                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6194                 break;
6195         case IPR_IOASC_IR_RESOURCE_HANDLE:
6196         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6197                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6198                 break;
6199         case IPR_IOASC_HW_SEL_TIMEOUT:
6200                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6201                 if (!ipr_is_naca_model(res))
6202                         res->needs_sync_complete = 1;
6203                 break;
6204         case IPR_IOASC_SYNC_REQUIRED:
6205                 if (!res->in_erp)
6206                         res->needs_sync_complete = 1;
6207                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6208                 break;
6209         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6210         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6211                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6212                 break;
6213         case IPR_IOASC_BUS_WAS_RESET:
6214         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6215                 /*
6216                  * Report the bus reset and ask for a retry. The device
6217                  * will give CC/UA the next command.
6218                  */
6219                 if (!res->resetting_device)
6220                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6221                 scsi_cmd->result |= (DID_ERROR << 16);
6222                 if (!ipr_is_naca_model(res))
6223                         res->needs_sync_complete = 1;
6224                 break;
6225         case IPR_IOASC_HW_DEV_BUS_STATUS:
6226                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6227                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6228                         if (!ipr_get_autosense(ipr_cmd)) {
6229                                 if (!ipr_is_naca_model(res)) {
6230                                         ipr_erp_cancel_all(ipr_cmd);
6231                                         return;
6232                                 }
6233                         }
6234                 }
6235                 if (!ipr_is_naca_model(res))
6236                         res->needs_sync_complete = 1;
6237                 break;
6238         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6239                 break;
6240         case IPR_IOASC_IR_NON_OPTIMIZED:
6241                 if (res->raw_mode) {
6242                         res->raw_mode = 0;
6243                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6244                 } else
6245                         scsi_cmd->result |= (DID_ERROR << 16);
6246                 break;
6247         default:
6248                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6249                         scsi_cmd->result |= (DID_ERROR << 16);
6250                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6251                         res->needs_sync_complete = 1;
6252                 break;
6253         }
6254
6255         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6256         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6257         scsi_cmd->scsi_done(scsi_cmd);
6258 }
6259
6260 /**
6261  * ipr_scsi_done - mid-layer done function
6262  * @ipr_cmd:    ipr command struct
6263  *
6264  * This function is invoked by the interrupt handler for
6265  * ops generated by the SCSI mid-layer
6266  *
6267  * Return value:
6268  *      none
6269  **/
6270 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6271 {
6272         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6273         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6274         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6275         unsigned long lock_flags;
6276
6277         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6278
6279         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6280                 scsi_dma_unmap(scsi_cmd);
6281
6282                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6283                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6284                 scsi_cmd->scsi_done(scsi_cmd);
6285                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6286         } else {
6287                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6288                 spin_lock(&ipr_cmd->hrrq->_lock);
6289                 ipr_erp_start(ioa_cfg, ipr_cmd);
6290                 spin_unlock(&ipr_cmd->hrrq->_lock);
6291                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6292         }
6293 }
6294
6295 /**
6296  * ipr_queuecommand - Queue a mid-layer request
6297  * @shost:              scsi host struct
6298  * @scsi_cmd:   scsi command struct
6299  *
6300  * This function queues a request generated by the mid-layer.
6301  *
6302  * Return value:
6303  *      0 on success
6304  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6305  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6306  **/
6307 static int ipr_queuecommand(struct Scsi_Host *shost,
6308                             struct scsi_cmnd *scsi_cmd)
6309 {
6310         struct ipr_ioa_cfg *ioa_cfg;
6311         struct ipr_resource_entry *res;
6312         struct ipr_ioarcb *ioarcb;
6313         struct ipr_cmnd *ipr_cmd;
6314         unsigned long hrrq_flags, lock_flags;
6315         int rc;
6316         struct ipr_hrr_queue *hrrq;
6317         int hrrq_id;
6318
6319         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6320
6321         scsi_cmd->result = (DID_OK << 16);
6322         res = scsi_cmd->device->hostdata;
6323
6324         if (ipr_is_gata(res) && res->sata_port) {
6325                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6326                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6327                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6328                 return rc;
6329         }
6330
6331         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6332         hrrq = &ioa_cfg->hrrq[hrrq_id];
6333
6334         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6335         /*
6336          * We are currently blocking all devices due to a host reset
6337          * We have told the host to stop giving us new requests, but
6338          * ERP ops don't count. FIXME
6339          */
6340         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6341                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6342                 return SCSI_MLQUEUE_HOST_BUSY;
6343         }
6344
6345         /*
6346          * FIXME - Create scsi_set_host_offline interface
6347          *  and the ioa_is_dead check can be removed
6348          */
6349         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6350                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6351                 goto err_nodev;
6352         }
6353
6354         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6355         if (ipr_cmd == NULL) {
6356                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6357                 return SCSI_MLQUEUE_HOST_BUSY;
6358         }
6359         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6360
6361         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6362         ioarcb = &ipr_cmd->ioarcb;
6363
6364         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6365         ipr_cmd->scsi_cmd = scsi_cmd;
6366         ipr_cmd->done = ipr_scsi_eh_done;
6367
6368         if (ipr_is_gscsi(res)) {
6369                 if (scsi_cmd->underflow == 0)
6370                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6371
6372                 if (res->reset_occurred) {
6373                         res->reset_occurred = 0;
6374                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6375                 }
6376         }
6377
6378         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6379                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6380
6381                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6382                 if (scsi_cmd->flags & SCMD_TAGGED)
6383                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6384                 else
6385                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6386         }
6387
6388         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6389             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6390                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6391         }
6392         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6393                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6394
6395                 if (scsi_cmd->underflow == 0)
6396                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6397         }
6398
6399         if (ioa_cfg->sis64)
6400                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6401         else
6402                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6403
6404         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6405         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6406                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6407                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6408                 if (!rc)
6409                         scsi_dma_unmap(scsi_cmd);
6410                 return SCSI_MLQUEUE_HOST_BUSY;
6411         }
6412
6413         if (unlikely(hrrq->ioa_is_dead)) {
6414                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6415                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6416                 scsi_dma_unmap(scsi_cmd);
6417                 goto err_nodev;
6418         }
6419
6420         ioarcb->res_handle = res->res_handle;
6421         if (res->needs_sync_complete) {
6422                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6423                 res->needs_sync_complete = 0;
6424         }
6425         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6426         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6427         ipr_send_command(ipr_cmd);
6428         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6429         return 0;
6430
6431 err_nodev:
6432         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6433         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6434         scsi_cmd->result = (DID_NO_CONNECT << 16);
6435         scsi_cmd->scsi_done(scsi_cmd);
6436         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6437         return 0;
6438 }
6439
6440 /**
6441  * ipr_ioctl - IOCTL handler
6442  * @sdev:       scsi device struct
6443  * @cmd:        IOCTL cmd
6444  * @arg:        IOCTL arg
6445  *
6446  * Return value:
6447  *      0 on success / other on failure
6448  **/
6449 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6450 {
6451         struct ipr_resource_entry *res;
6452
6453         res = (struct ipr_resource_entry *)sdev->hostdata;
6454         if (res && ipr_is_gata(res)) {
6455                 if (cmd == HDIO_GET_IDENTITY)
6456                         return -ENOTTY;
6457                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6458         }
6459
6460         return -EINVAL;
6461 }
6462
6463 /**
6464  * ipr_info - Get information about the card/driver
6465  * @scsi_host:  scsi host struct
6466  *
6467  * Return value:
6468  *      pointer to buffer with description string
6469  **/
6470 static const char *ipr_ioa_info(struct Scsi_Host *host)
6471 {
6472         static char buffer[512];
6473         struct ipr_ioa_cfg *ioa_cfg;
6474         unsigned long lock_flags = 0;
6475
6476         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6477
6478         spin_lock_irqsave(host->host_lock, lock_flags);
6479         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6480         spin_unlock_irqrestore(host->host_lock, lock_flags);
6481
6482         return buffer;
6483 }
6484
6485 static struct scsi_host_template driver_template = {
6486         .module = THIS_MODULE,
6487         .name = "IPR",
6488         .info = ipr_ioa_info,
6489         .ioctl = ipr_ioctl,
6490         .queuecommand = ipr_queuecommand,
6491         .eh_abort_handler = ipr_eh_abort,
6492         .eh_device_reset_handler = ipr_eh_dev_reset,
6493         .eh_host_reset_handler = ipr_eh_host_reset,
6494         .slave_alloc = ipr_slave_alloc,
6495         .slave_configure = ipr_slave_configure,
6496         .slave_destroy = ipr_slave_destroy,
6497         .scan_finished = ipr_scan_finished,
6498         .target_alloc = ipr_target_alloc,
6499         .target_destroy = ipr_target_destroy,
6500         .change_queue_depth = ipr_change_queue_depth,
6501         .bios_param = ipr_biosparam,
6502         .can_queue = IPR_MAX_COMMANDS,
6503         .this_id = -1,
6504         .sg_tablesize = IPR_MAX_SGLIST,
6505         .max_sectors = IPR_IOA_MAX_SECTORS,
6506         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6507         .use_clustering = ENABLE_CLUSTERING,
6508         .shost_attrs = ipr_ioa_attrs,
6509         .sdev_attrs = ipr_dev_attrs,
6510         .proc_name = IPR_NAME,
6511 };
6512
6513 /**
6514  * ipr_ata_phy_reset - libata phy_reset handler
6515  * @ap:         ata port to reset
6516  *
6517  **/
6518 static void ipr_ata_phy_reset(struct ata_port *ap)
6519 {
6520         unsigned long flags;
6521         struct ipr_sata_port *sata_port = ap->private_data;
6522         struct ipr_resource_entry *res = sata_port->res;
6523         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6524         int rc;
6525
6526         ENTER;
6527         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6528         while (ioa_cfg->in_reset_reload) {
6529                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6530                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6531                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6532         }
6533
6534         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6535                 goto out_unlock;
6536
6537         rc = ipr_device_reset(ioa_cfg, res);
6538
6539         if (rc) {
6540                 ap->link.device[0].class = ATA_DEV_NONE;
6541                 goto out_unlock;
6542         }
6543
6544         ap->link.device[0].class = res->ata_class;
6545         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6546                 ap->link.device[0].class = ATA_DEV_NONE;
6547
6548 out_unlock:
6549         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6550         LEAVE;
6551 }
6552
6553 /**
6554  * ipr_ata_post_internal - Cleanup after an internal command
6555  * @qc: ATA queued command
6556  *
6557  * Return value:
6558  *      none
6559  **/
6560 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6561 {
6562         struct ipr_sata_port *sata_port = qc->ap->private_data;
6563         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6564         struct ipr_cmnd *ipr_cmd;
6565         struct ipr_hrr_queue *hrrq;
6566         unsigned long flags;
6567
6568         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6569         while (ioa_cfg->in_reset_reload) {
6570                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6571                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6572                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6573         }
6574
6575         for_each_hrrq(hrrq, ioa_cfg) {
6576                 spin_lock(&hrrq->_lock);
6577                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6578                         if (ipr_cmd->qc == qc) {
6579                                 ipr_device_reset(ioa_cfg, sata_port->res);
6580                                 break;
6581                         }
6582                 }
6583                 spin_unlock(&hrrq->_lock);
6584         }
6585         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6586 }
6587
6588 /**
6589  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6590  * @regs:       destination
6591  * @tf: source ATA taskfile
6592  *
6593  * Return value:
6594  *      none
6595  **/
6596 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6597                              struct ata_taskfile *tf)
6598 {
6599         regs->feature = tf->feature;
6600         regs->nsect = tf->nsect;
6601         regs->lbal = tf->lbal;
6602         regs->lbam = tf->lbam;
6603         regs->lbah = tf->lbah;
6604         regs->device = tf->device;
6605         regs->command = tf->command;
6606         regs->hob_feature = tf->hob_feature;
6607         regs->hob_nsect = tf->hob_nsect;
6608         regs->hob_lbal = tf->hob_lbal;
6609         regs->hob_lbam = tf->hob_lbam;
6610         regs->hob_lbah = tf->hob_lbah;
6611         regs->ctl = tf->ctl;
6612 }
6613
6614 /**
6615  * ipr_sata_done - done function for SATA commands
6616  * @ipr_cmd:    ipr command struct
6617  *
6618  * This function is invoked by the interrupt handler for
6619  * ops generated by the SCSI mid-layer to SATA devices
6620  *
6621  * Return value:
6622  *      none
6623  **/
6624 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6625 {
6626         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6627         struct ata_queued_cmd *qc = ipr_cmd->qc;
6628         struct ipr_sata_port *sata_port = qc->ap->private_data;
6629         struct ipr_resource_entry *res = sata_port->res;
6630         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6631
6632         spin_lock(&ipr_cmd->hrrq->_lock);
6633         if (ipr_cmd->ioa_cfg->sis64)
6634                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6635                        sizeof(struct ipr_ioasa_gata));
6636         else
6637                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6638                        sizeof(struct ipr_ioasa_gata));
6639         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6640
6641         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6642                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6643
6644         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6645                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6646         else
6647                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6648         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6649         spin_unlock(&ipr_cmd->hrrq->_lock);
6650         ata_qc_complete(qc);
6651 }
6652
6653 /**
6654  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6655  * @ipr_cmd:    ipr command struct
6656  * @qc:         ATA queued command
6657  *
6658  **/
6659 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6660                                   struct ata_queued_cmd *qc)
6661 {
6662         u32 ioadl_flags = 0;
6663         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6664         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6665         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6666         int len = qc->nbytes;
6667         struct scatterlist *sg;
6668         unsigned int si;
6669         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6670
6671         if (len == 0)
6672                 return;
6673
6674         if (qc->dma_dir == DMA_TO_DEVICE) {
6675                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6676                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6677         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6678                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6679
6680         ioarcb->data_transfer_length = cpu_to_be32(len);
6681         ioarcb->ioadl_len =
6682                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6683         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6684                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6685
6686         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6687                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6688                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6689                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6690
6691                 last_ioadl64 = ioadl64;
6692                 ioadl64++;
6693         }
6694
6695         if (likely(last_ioadl64))
6696                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6697 }
6698
6699 /**
6700  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6701  * @ipr_cmd:    ipr command struct
6702  * @qc:         ATA queued command
6703  *
6704  **/
6705 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6706                                 struct ata_queued_cmd *qc)
6707 {
6708         u32 ioadl_flags = 0;
6709         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6710         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6711         struct ipr_ioadl_desc *last_ioadl = NULL;
6712         int len = qc->nbytes;
6713         struct scatterlist *sg;
6714         unsigned int si;
6715
6716         if (len == 0)
6717                 return;
6718
6719         if (qc->dma_dir == DMA_TO_DEVICE) {
6720                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6721                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6722                 ioarcb->data_transfer_length = cpu_to_be32(len);
6723                 ioarcb->ioadl_len =
6724                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6725         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6726                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6727                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6728                 ioarcb->read_ioadl_len =
6729                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6730         }
6731
6732         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6733                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6734                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6735
6736                 last_ioadl = ioadl;
6737                 ioadl++;
6738         }
6739
6740         if (likely(last_ioadl))
6741                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6742 }
6743
6744 /**
6745  * ipr_qc_defer - Get a free ipr_cmd
6746  * @qc: queued command
6747  *
6748  * Return value:
6749  *      0 if success
6750  **/
6751 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6752 {
6753         struct ata_port *ap = qc->ap;
6754         struct ipr_sata_port *sata_port = ap->private_data;
6755         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6756         struct ipr_cmnd *ipr_cmd;
6757         struct ipr_hrr_queue *hrrq;
6758         int hrrq_id;
6759
6760         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6761         hrrq = &ioa_cfg->hrrq[hrrq_id];
6762
6763         qc->lldd_task = NULL;
6764         spin_lock(&hrrq->_lock);
6765         if (unlikely(hrrq->ioa_is_dead)) {
6766                 spin_unlock(&hrrq->_lock);
6767                 return 0;
6768         }
6769
6770         if (unlikely(!hrrq->allow_cmds)) {
6771                 spin_unlock(&hrrq->_lock);
6772                 return ATA_DEFER_LINK;
6773         }
6774
6775         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6776         if (ipr_cmd == NULL) {
6777                 spin_unlock(&hrrq->_lock);
6778                 return ATA_DEFER_LINK;
6779         }
6780
6781         qc->lldd_task = ipr_cmd;
6782         spin_unlock(&hrrq->_lock);
6783         return 0;
6784 }
6785
6786 /**
6787  * ipr_qc_issue - Issue a SATA qc to a device
6788  * @qc: queued command
6789  *
6790  * Return value:
6791  *      0 if success
6792  **/
6793 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6794 {
6795         struct ata_port *ap = qc->ap;
6796         struct ipr_sata_port *sata_port = ap->private_data;
6797         struct ipr_resource_entry *res = sata_port->res;
6798         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6799         struct ipr_cmnd *ipr_cmd;
6800         struct ipr_ioarcb *ioarcb;
6801         struct ipr_ioarcb_ata_regs *regs;
6802
6803         if (qc->lldd_task == NULL)
6804                 ipr_qc_defer(qc);
6805
6806         ipr_cmd = qc->lldd_task;
6807         if (ipr_cmd == NULL)
6808                 return AC_ERR_SYSTEM;
6809
6810         qc->lldd_task = NULL;
6811         spin_lock(&ipr_cmd->hrrq->_lock);
6812         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6813                         ipr_cmd->hrrq->ioa_is_dead)) {
6814                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6815                 spin_unlock(&ipr_cmd->hrrq->_lock);
6816                 return AC_ERR_SYSTEM;
6817         }
6818
6819         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6820         ioarcb = &ipr_cmd->ioarcb;
6821
6822         if (ioa_cfg->sis64) {
6823                 regs = &ipr_cmd->i.ata_ioadl.regs;
6824                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6825         } else
6826                 regs = &ioarcb->u.add_data.u.regs;
6827
6828         memset(regs, 0, sizeof(*regs));
6829         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6830
6831         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6832         ipr_cmd->qc = qc;
6833         ipr_cmd->done = ipr_sata_done;
6834         ipr_cmd->ioarcb.res_handle = res->res_handle;
6835         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6836         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6837         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6838         ipr_cmd->dma_use_sg = qc->n_elem;
6839
6840         if (ioa_cfg->sis64)
6841                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6842         else
6843                 ipr_build_ata_ioadl(ipr_cmd, qc);
6844
6845         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6846         ipr_copy_sata_tf(regs, &qc->tf);
6847         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6848         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6849
6850         switch (qc->tf.protocol) {
6851         case ATA_PROT_NODATA:
6852         case ATA_PROT_PIO:
6853                 break;
6854
6855         case ATA_PROT_DMA:
6856                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6857                 break;
6858
6859         case ATAPI_PROT_PIO:
6860         case ATAPI_PROT_NODATA:
6861                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6862                 break;
6863
6864         case ATAPI_PROT_DMA:
6865                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6866                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6867                 break;
6868
6869         default:
6870                 WARN_ON(1);
6871                 spin_unlock(&ipr_cmd->hrrq->_lock);
6872                 return AC_ERR_INVALID;
6873         }
6874
6875         ipr_send_command(ipr_cmd);
6876         spin_unlock(&ipr_cmd->hrrq->_lock);
6877
6878         return 0;
6879 }
6880
6881 /**
6882  * ipr_qc_fill_rtf - Read result TF
6883  * @qc: ATA queued command
6884  *
6885  * Return value:
6886  *      true
6887  **/
6888 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6889 {
6890         struct ipr_sata_port *sata_port = qc->ap->private_data;
6891         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6892         struct ata_taskfile *tf = &qc->result_tf;
6893
6894         tf->feature = g->error;
6895         tf->nsect = g->nsect;
6896         tf->lbal = g->lbal;
6897         tf->lbam = g->lbam;
6898         tf->lbah = g->lbah;
6899         tf->device = g->device;
6900         tf->command = g->status;
6901         tf->hob_nsect = g->hob_nsect;
6902         tf->hob_lbal = g->hob_lbal;
6903         tf->hob_lbam = g->hob_lbam;
6904         tf->hob_lbah = g->hob_lbah;
6905
6906         return true;
6907 }
6908
6909 static struct ata_port_operations ipr_sata_ops = {
6910         .phy_reset = ipr_ata_phy_reset,
6911         .hardreset = ipr_sata_reset,
6912         .post_internal_cmd = ipr_ata_post_internal,
6913         .qc_prep = ata_noop_qc_prep,
6914         .qc_defer = ipr_qc_defer,
6915         .qc_issue = ipr_qc_issue,
6916         .qc_fill_rtf = ipr_qc_fill_rtf,
6917         .port_start = ata_sas_port_start,
6918         .port_stop = ata_sas_port_stop
6919 };
6920
6921 static struct ata_port_info sata_port_info = {
6922         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6923                           ATA_FLAG_SAS_HOST,
6924         .pio_mask       = ATA_PIO4_ONLY,
6925         .mwdma_mask     = ATA_MWDMA2,
6926         .udma_mask      = ATA_UDMA6,
6927         .port_ops       = &ipr_sata_ops
6928 };
6929
6930 #ifdef CONFIG_PPC_PSERIES
6931 static const u16 ipr_blocked_processors[] = {
6932         PVR_NORTHSTAR,
6933         PVR_PULSAR,
6934         PVR_POWER4,
6935         PVR_ICESTAR,
6936         PVR_SSTAR,
6937         PVR_POWER4p,
6938         PVR_630,
6939         PVR_630p
6940 };
6941
6942 /**
6943  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6944  * @ioa_cfg:    ioa cfg struct
6945  *
6946  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6947  * certain pSeries hardware. This function determines if the given
6948  * adapter is in one of these confgurations or not.
6949  *
6950  * Return value:
6951  *      1 if adapter is not supported / 0 if adapter is supported
6952  **/
6953 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6954 {
6955         int i;
6956
6957         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6958                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6959                         if (pvr_version_is(ipr_blocked_processors[i]))
6960                                 return 1;
6961                 }
6962         }
6963         return 0;
6964 }
6965 #else
6966 #define ipr_invalid_adapter(ioa_cfg) 0
6967 #endif
6968
6969 /**
6970  * ipr_ioa_bringdown_done - IOA bring down completion.
6971  * @ipr_cmd:    ipr command struct
6972  *
6973  * This function processes the completion of an adapter bring down.
6974  * It wakes any reset sleepers.
6975  *
6976  * Return value:
6977  *      IPR_RC_JOB_RETURN
6978  **/
6979 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6980 {
6981         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6982         int i;
6983
6984         ENTER;
6985         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6986                 ipr_trace;
6987                 spin_unlock_irq(ioa_cfg->host->host_lock);
6988                 scsi_unblock_requests(ioa_cfg->host);
6989                 spin_lock_irq(ioa_cfg->host->host_lock);
6990         }
6991
6992         ioa_cfg->in_reset_reload = 0;
6993         ioa_cfg->reset_retries = 0;
6994         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6995                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6996                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6997                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6998         }
6999         wmb();
7000
7001         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7002         wake_up_all(&ioa_cfg->reset_wait_q);
7003         LEAVE;
7004
7005         return IPR_RC_JOB_RETURN;
7006 }
7007
7008 /**
7009  * ipr_ioa_reset_done - IOA reset completion.
7010  * @ipr_cmd:    ipr command struct
7011  *
7012  * This function processes the completion of an adapter reset.
7013  * It schedules any necessary mid-layer add/removes and
7014  * wakes any reset sleepers.
7015  *
7016  * Return value:
7017  *      IPR_RC_JOB_RETURN
7018  **/
7019 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7020 {
7021         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7022         struct ipr_resource_entry *res;
7023         struct ipr_hostrcb *hostrcb, *temp;
7024         int i = 0, j;
7025
7026         ENTER;
7027         ioa_cfg->in_reset_reload = 0;
7028         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7029                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7030                 ioa_cfg->hrrq[j].allow_cmds = 1;
7031                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7032         }
7033         wmb();
7034         ioa_cfg->reset_cmd = NULL;
7035         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7036
7037         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7038                 if (res->add_to_ml || res->del_from_ml) {
7039                         ipr_trace;
7040                         break;
7041                 }
7042         }
7043         schedule_work(&ioa_cfg->work_q);
7044
7045         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7046                 list_del(&hostrcb->queue);
7047                 if (i++ < IPR_NUM_LOG_HCAMS)
7048                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7049                 else
7050                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7051         }
7052
7053         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7054         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7055
7056         ioa_cfg->reset_retries = 0;
7057         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7058         wake_up_all(&ioa_cfg->reset_wait_q);
7059
7060         spin_unlock(ioa_cfg->host->host_lock);
7061         scsi_unblock_requests(ioa_cfg->host);
7062         spin_lock(ioa_cfg->host->host_lock);
7063
7064         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7065                 scsi_block_requests(ioa_cfg->host);
7066
7067         schedule_work(&ioa_cfg->work_q);
7068         LEAVE;
7069         return IPR_RC_JOB_RETURN;
7070 }
7071
7072 /**
7073  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7074  * @supported_dev:      supported device struct
7075  * @vpids:                      vendor product id struct
7076  *
7077  * Return value:
7078  *      none
7079  **/
7080 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7081                                  struct ipr_std_inq_vpids *vpids)
7082 {
7083         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7084         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7085         supported_dev->num_records = 1;
7086         supported_dev->data_length =
7087                 cpu_to_be16(sizeof(struct ipr_supported_device));
7088         supported_dev->reserved = 0;
7089 }
7090
7091 /**
7092  * ipr_set_supported_devs - Send Set Supported Devices for a device
7093  * @ipr_cmd:    ipr command struct
7094  *
7095  * This function sends a Set Supported Devices to the adapter
7096  *
7097  * Return value:
7098  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7099  **/
7100 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7101 {
7102         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7103         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7104         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7105         struct ipr_resource_entry *res = ipr_cmd->u.res;
7106
7107         ipr_cmd->job_step = ipr_ioa_reset_done;
7108
7109         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7110                 if (!ipr_is_scsi_disk(res))
7111                         continue;
7112
7113                 ipr_cmd->u.res = res;
7114                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7115
7116                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7117                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7118                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7119
7120                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7121                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7122                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7123                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7124
7125                 ipr_init_ioadl(ipr_cmd,
7126                                ioa_cfg->vpd_cbs_dma +
7127                                  offsetof(struct ipr_misc_cbs, supp_dev),
7128                                sizeof(struct ipr_supported_device),
7129                                IPR_IOADL_FLAGS_WRITE_LAST);
7130
7131                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7132                            IPR_SET_SUP_DEVICE_TIMEOUT);
7133
7134                 if (!ioa_cfg->sis64)
7135                         ipr_cmd->job_step = ipr_set_supported_devs;
7136                 LEAVE;
7137                 return IPR_RC_JOB_RETURN;
7138         }
7139
7140         LEAVE;
7141         return IPR_RC_JOB_CONTINUE;
7142 }
7143
7144 /**
7145  * ipr_get_mode_page - Locate specified mode page
7146  * @mode_pages: mode page buffer
7147  * @page_code:  page code to find
7148  * @len:                minimum required length for mode page
7149  *
7150  * Return value:
7151  *      pointer to mode page / NULL on failure
7152  **/
7153 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7154                                u32 page_code, u32 len)
7155 {
7156         struct ipr_mode_page_hdr *mode_hdr;
7157         u32 page_length;
7158         u32 length;
7159
7160         if (!mode_pages || (mode_pages->hdr.length == 0))
7161                 return NULL;
7162
7163         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7164         mode_hdr = (struct ipr_mode_page_hdr *)
7165                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7166
7167         while (length) {
7168                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7169                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7170                                 return mode_hdr;
7171                         break;
7172                 } else {
7173                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7174                                        mode_hdr->page_length);
7175                         length -= page_length;
7176                         mode_hdr = (struct ipr_mode_page_hdr *)
7177                                 ((unsigned long)mode_hdr + page_length);
7178                 }
7179         }
7180         return NULL;
7181 }
7182
7183 /**
7184  * ipr_check_term_power - Check for term power errors
7185  * @ioa_cfg:    ioa config struct
7186  * @mode_pages: IOAFP mode pages buffer
7187  *
7188  * Check the IOAFP's mode page 28 for term power errors
7189  *
7190  * Return value:
7191  *      nothing
7192  **/
7193 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7194                                  struct ipr_mode_pages *mode_pages)
7195 {
7196         int i;
7197         int entry_length;
7198         struct ipr_dev_bus_entry *bus;
7199         struct ipr_mode_page28 *mode_page;
7200
7201         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7202                                       sizeof(struct ipr_mode_page28));
7203
7204         entry_length = mode_page->entry_length;
7205
7206         bus = mode_page->bus;
7207
7208         for (i = 0; i < mode_page->num_entries; i++) {
7209                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7210                         dev_err(&ioa_cfg->pdev->dev,
7211                                 "Term power is absent on scsi bus %d\n",
7212                                 bus->res_addr.bus);
7213                 }
7214
7215                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7216         }
7217 }
7218
7219 /**
7220  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7221  * @ioa_cfg:    ioa config struct
7222  *
7223  * Looks through the config table checking for SES devices. If
7224  * the SES device is in the SES table indicating a maximum SCSI
7225  * bus speed, the speed is limited for the bus.
7226  *
7227  * Return value:
7228  *      none
7229  **/
7230 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7231 {
7232         u32 max_xfer_rate;
7233         int i;
7234
7235         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7236                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7237                                                        ioa_cfg->bus_attr[i].bus_width);
7238
7239                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7240                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7241         }
7242 }
7243
7244 /**
7245  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7246  * @ioa_cfg:    ioa config struct
7247  * @mode_pages: mode page 28 buffer
7248  *
7249  * Updates mode page 28 based on driver configuration
7250  *
7251  * Return value:
7252  *      none
7253  **/
7254 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7255                                           struct ipr_mode_pages *mode_pages)
7256 {
7257         int i, entry_length;
7258         struct ipr_dev_bus_entry *bus;
7259         struct ipr_bus_attributes *bus_attr;
7260         struct ipr_mode_page28 *mode_page;
7261
7262         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7263                                       sizeof(struct ipr_mode_page28));
7264
7265         entry_length = mode_page->entry_length;
7266
7267         /* Loop for each device bus entry */
7268         for (i = 0, bus = mode_page->bus;
7269              i < mode_page->num_entries;
7270              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7271                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7272                         dev_err(&ioa_cfg->pdev->dev,
7273                                 "Invalid resource address reported: 0x%08X\n",
7274                                 IPR_GET_PHYS_LOC(bus->res_addr));
7275                         continue;
7276                 }
7277
7278                 bus_attr = &ioa_cfg->bus_attr[i];
7279                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7280                 bus->bus_width = bus_attr->bus_width;
7281                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7282                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7283                 if (bus_attr->qas_enabled)
7284                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7285                 else
7286                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7287         }
7288 }
7289
7290 /**
7291  * ipr_build_mode_select - Build a mode select command
7292  * @ipr_cmd:    ipr command struct
7293  * @res_handle: resource handle to send command to
7294  * @parm:               Byte 2 of Mode Sense command
7295  * @dma_addr:   DMA buffer address
7296  * @xfer_len:   data transfer length
7297  *
7298  * Return value:
7299  *      none
7300  **/
7301 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7302                                   __be32 res_handle, u8 parm,
7303                                   dma_addr_t dma_addr, u8 xfer_len)
7304 {
7305         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7306
7307         ioarcb->res_handle = res_handle;
7308         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7309         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7310         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7311         ioarcb->cmd_pkt.cdb[1] = parm;
7312         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7313
7314         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7315 }
7316
7317 /**
7318  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7319  * @ipr_cmd:    ipr command struct
7320  *
7321  * This function sets up the SCSI bus attributes and sends
7322  * a Mode Select for Page 28 to activate them.
7323  *
7324  * Return value:
7325  *      IPR_RC_JOB_RETURN
7326  **/
7327 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7328 {
7329         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7330         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7331         int length;
7332
7333         ENTER;
7334         ipr_scsi_bus_speed_limit(ioa_cfg);
7335         ipr_check_term_power(ioa_cfg, mode_pages);
7336         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7337         length = mode_pages->hdr.length + 1;
7338         mode_pages->hdr.length = 0;
7339
7340         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7341                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7342                               length);
7343
7344         ipr_cmd->job_step = ipr_set_supported_devs;
7345         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7346                                     struct ipr_resource_entry, queue);
7347         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7348
7349         LEAVE;
7350         return IPR_RC_JOB_RETURN;
7351 }
7352
7353 /**
7354  * ipr_build_mode_sense - Builds a mode sense command
7355  * @ipr_cmd:    ipr command struct
7356  * @res:                resource entry struct
7357  * @parm:               Byte 2 of mode sense command
7358  * @dma_addr:   DMA address of mode sense buffer
7359  * @xfer_len:   Size of DMA buffer
7360  *
7361  * Return value:
7362  *      none
7363  **/
7364 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7365                                  __be32 res_handle,
7366                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7367 {
7368         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7369
7370         ioarcb->res_handle = res_handle;
7371         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7372         ioarcb->cmd_pkt.cdb[2] = parm;
7373         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7374         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7375
7376         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7377 }
7378
7379 /**
7380  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7381  * @ipr_cmd:    ipr command struct
7382  *
7383  * This function handles the failure of an IOA bringup command.
7384  *
7385  * Return value:
7386  *      IPR_RC_JOB_RETURN
7387  **/
7388 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7389 {
7390         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7391         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7392
7393         dev_err(&ioa_cfg->pdev->dev,
7394                 "0x%02X failed with IOASC: 0x%08X\n",
7395                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7396
7397         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7398         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7399         return IPR_RC_JOB_RETURN;
7400 }
7401
7402 /**
7403  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7404  * @ipr_cmd:    ipr command struct
7405  *
7406  * This function handles the failure of a Mode Sense to the IOAFP.
7407  * Some adapters do not handle all mode pages.
7408  *
7409  * Return value:
7410  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7411  **/
7412 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7413 {
7414         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7415         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7416
7417         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7418                 ipr_cmd->job_step = ipr_set_supported_devs;
7419                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7420                                             struct ipr_resource_entry, queue);
7421                 return IPR_RC_JOB_CONTINUE;
7422         }
7423
7424         return ipr_reset_cmd_failed(ipr_cmd);
7425 }
7426
7427 /**
7428  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7429  * @ipr_cmd:    ipr command struct
7430  *
7431  * This function send a Page 28 mode sense to the IOA to
7432  * retrieve SCSI bus attributes.
7433  *
7434  * Return value:
7435  *      IPR_RC_JOB_RETURN
7436  **/
7437 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7438 {
7439         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7440
7441         ENTER;
7442         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7443                              0x28, ioa_cfg->vpd_cbs_dma +
7444                              offsetof(struct ipr_misc_cbs, mode_pages),
7445                              sizeof(struct ipr_mode_pages));
7446
7447         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7448         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7449
7450         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7451
7452         LEAVE;
7453         return IPR_RC_JOB_RETURN;
7454 }
7455
7456 /**
7457  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7458  * @ipr_cmd:    ipr command struct
7459  *
7460  * This function enables dual IOA RAID support if possible.
7461  *
7462  * Return value:
7463  *      IPR_RC_JOB_RETURN
7464  **/
7465 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7466 {
7467         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7468         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7469         struct ipr_mode_page24 *mode_page;
7470         int length;
7471
7472         ENTER;
7473         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7474                                       sizeof(struct ipr_mode_page24));
7475
7476         if (mode_page)
7477                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7478
7479         length = mode_pages->hdr.length + 1;
7480         mode_pages->hdr.length = 0;
7481
7482         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7483                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7484                               length);
7485
7486         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7487         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7488
7489         LEAVE;
7490         return IPR_RC_JOB_RETURN;
7491 }
7492
7493 /**
7494  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7495  * @ipr_cmd:    ipr command struct
7496  *
7497  * This function handles the failure of a Mode Sense to the IOAFP.
7498  * Some adapters do not handle all mode pages.
7499  *
7500  * Return value:
7501  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7502  **/
7503 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7504 {
7505         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7506
7507         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7508                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7509                 return IPR_RC_JOB_CONTINUE;
7510         }
7511
7512         return ipr_reset_cmd_failed(ipr_cmd);
7513 }
7514
7515 /**
7516  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7517  * @ipr_cmd:    ipr command struct
7518  *
7519  * This function send a mode sense to the IOA to retrieve
7520  * the IOA Advanced Function Control mode page.
7521  *
7522  * Return value:
7523  *      IPR_RC_JOB_RETURN
7524  **/
7525 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7526 {
7527         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7528
7529         ENTER;
7530         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7531                              0x24, ioa_cfg->vpd_cbs_dma +
7532                              offsetof(struct ipr_misc_cbs, mode_pages),
7533                              sizeof(struct ipr_mode_pages));
7534
7535         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7536         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7537
7538         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7539
7540         LEAVE;
7541         return IPR_RC_JOB_RETURN;
7542 }
7543
7544 /**
7545  * ipr_init_res_table - Initialize the resource table
7546  * @ipr_cmd:    ipr command struct
7547  *
7548  * This function looks through the existing resource table, comparing
7549  * it with the config table. This function will take care of old/new
7550  * devices and schedule adding/removing them from the mid-layer
7551  * as appropriate.
7552  *
7553  * Return value:
7554  *      IPR_RC_JOB_CONTINUE
7555  **/
7556 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7557 {
7558         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7559         struct ipr_resource_entry *res, *temp;
7560         struct ipr_config_table_entry_wrapper cfgtew;
7561         int entries, found, flag, i;
7562         LIST_HEAD(old_res);
7563
7564         ENTER;
7565         if (ioa_cfg->sis64)
7566                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7567         else
7568                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7569
7570         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7571                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7572
7573         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7574                 list_move_tail(&res->queue, &old_res);
7575
7576         if (ioa_cfg->sis64)
7577                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7578         else
7579                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7580
7581         for (i = 0; i < entries; i++) {
7582                 if (ioa_cfg->sis64)
7583                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7584                 else
7585                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7586                 found = 0;
7587
7588                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7589                         if (ipr_is_same_device(res, &cfgtew)) {
7590                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7591                                 found = 1;
7592                                 break;
7593                         }
7594                 }
7595
7596                 if (!found) {
7597                         if (list_empty(&ioa_cfg->free_res_q)) {
7598                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7599                                 break;
7600                         }
7601
7602                         found = 1;
7603                         res = list_entry(ioa_cfg->free_res_q.next,
7604                                          struct ipr_resource_entry, queue);
7605                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7606                         ipr_init_res_entry(res, &cfgtew);
7607                         res->add_to_ml = 1;
7608                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7609                         res->sdev->allow_restart = 1;
7610
7611                 if (found)
7612                         ipr_update_res_entry(res, &cfgtew);
7613         }
7614
7615         list_for_each_entry_safe(res, temp, &old_res, queue) {
7616                 if (res->sdev) {
7617                         res->del_from_ml = 1;
7618                         res->res_handle = IPR_INVALID_RES_HANDLE;
7619                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7620                 }
7621         }
7622
7623         list_for_each_entry_safe(res, temp, &old_res, queue) {
7624                 ipr_clear_res_target(res);
7625                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7626         }
7627
7628         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7629                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7630         else
7631                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7632
7633         LEAVE;
7634         return IPR_RC_JOB_CONTINUE;
7635 }
7636
7637 /**
7638  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7639  * @ipr_cmd:    ipr command struct
7640  *
7641  * This function sends a Query IOA Configuration command
7642  * to the adapter to retrieve the IOA configuration table.
7643  *
7644  * Return value:
7645  *      IPR_RC_JOB_RETURN
7646  **/
7647 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7648 {
7649         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7650         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7651         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7652         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7653
7654         ENTER;
7655         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7656                 ioa_cfg->dual_raid = 1;
7657         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7658                  ucode_vpd->major_release, ucode_vpd->card_type,
7659                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7660         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7661         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7662
7663         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7664         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7665         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7666         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7667
7668         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7669                        IPR_IOADL_FLAGS_READ_LAST);
7670
7671         ipr_cmd->job_step = ipr_init_res_table;
7672
7673         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7674
7675         LEAVE;
7676         return IPR_RC_JOB_RETURN;
7677 }
7678
7679 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7680 {
7681         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7682
7683         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7684                 return IPR_RC_JOB_CONTINUE;
7685
7686         return ipr_reset_cmd_failed(ipr_cmd);
7687 }
7688
7689 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7690                                          __be32 res_handle, u8 sa_code)
7691 {
7692         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7693
7694         ioarcb->res_handle = res_handle;
7695         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7696         ioarcb->cmd_pkt.cdb[1] = sa_code;
7697         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7698 }
7699
7700 /**
7701  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7702  * action
7703  *
7704  * Return value:
7705  *      none
7706  **/
7707 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7708 {
7709         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7710         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7711         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7712
7713         ENTER;
7714
7715         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7716
7717         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7718                 ipr_build_ioa_service_action(ipr_cmd,
7719                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7720                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7721
7722                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7723
7724                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7725                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7726                            IPR_SET_SUP_DEVICE_TIMEOUT);
7727
7728                 LEAVE;
7729                 return IPR_RC_JOB_RETURN;
7730         }
7731
7732         LEAVE;
7733         return IPR_RC_JOB_CONTINUE;
7734 }
7735
7736 /**
7737  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7738  * @ipr_cmd:    ipr command struct
7739  *
7740  * This utility function sends an inquiry to the adapter.
7741  *
7742  * Return value:
7743  *      none
7744  **/
7745 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7746                               dma_addr_t dma_addr, u8 xfer_len)
7747 {
7748         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7749
7750         ENTER;
7751         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7752         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7753
7754         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7755         ioarcb->cmd_pkt.cdb[1] = flags;
7756         ioarcb->cmd_pkt.cdb[2] = page;
7757         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7758
7759         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7760
7761         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7762         LEAVE;
7763 }
7764
7765 /**
7766  * ipr_inquiry_page_supported - Is the given inquiry page supported
7767  * @page0:              inquiry page 0 buffer
7768  * @page:               page code.
7769  *
7770  * This function determines if the specified inquiry page is supported.
7771  *
7772  * Return value:
7773  *      1 if page is supported / 0 if not
7774  **/
7775 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7776 {
7777         int i;
7778
7779         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7780                 if (page0->page[i] == page)
7781                         return 1;
7782
7783         return 0;
7784 }
7785
7786 /**
7787  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7788  * @ipr_cmd:    ipr command struct
7789  *
7790  * This function sends a Page 0xC4 inquiry to the adapter
7791  * to retrieve software VPD information.
7792  *
7793  * Return value:
7794  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7795  **/
7796 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7797 {
7798         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7799         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7800         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7801
7802         ENTER;
7803         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7804         memset(pageC4, 0, sizeof(*pageC4));
7805
7806         if (ipr_inquiry_page_supported(page0, 0xC4)) {
7807                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7808                                   (ioa_cfg->vpd_cbs_dma
7809                                    + offsetof(struct ipr_misc_cbs,
7810                                               pageC4_data)),
7811                                   sizeof(struct ipr_inquiry_pageC4));
7812                 return IPR_RC_JOB_RETURN;
7813         }
7814
7815         LEAVE;
7816         return IPR_RC_JOB_CONTINUE;
7817 }
7818
7819 /**
7820  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7821  * @ipr_cmd:    ipr command struct
7822  *
7823  * This function sends a Page 0xD0 inquiry to the adapter
7824  * to retrieve adapter capabilities.
7825  *
7826  * Return value:
7827  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7828  **/
7829 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7830 {
7831         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7832         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7833         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7834
7835         ENTER;
7836         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7837         memset(cap, 0, sizeof(*cap));
7838
7839         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7840                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7841                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7842                                   sizeof(struct ipr_inquiry_cap));
7843                 return IPR_RC_JOB_RETURN;
7844         }
7845
7846         LEAVE;
7847         return IPR_RC_JOB_CONTINUE;
7848 }
7849
7850 /**
7851  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7852  * @ipr_cmd:    ipr command struct
7853  *
7854  * This function sends a Page 3 inquiry to the adapter
7855  * to retrieve software VPD information.
7856  *
7857  * Return value:
7858  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7859  **/
7860 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7861 {
7862         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7863
7864         ENTER;
7865
7866         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7867
7868         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7869                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7870                           sizeof(struct ipr_inquiry_page3));
7871
7872         LEAVE;
7873         return IPR_RC_JOB_RETURN;
7874 }
7875
7876 /**
7877  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7878  * @ipr_cmd:    ipr command struct
7879  *
7880  * This function sends a Page 0 inquiry to the adapter
7881  * to retrieve supported inquiry pages.
7882  *
7883  * Return value:
7884  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7885  **/
7886 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7887 {
7888         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7889         char type[5];
7890
7891         ENTER;
7892
7893         /* Grab the type out of the VPD and store it away */
7894         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7895         type[4] = '\0';
7896         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7897
7898         if (ipr_invalid_adapter(ioa_cfg)) {
7899                 dev_err(&ioa_cfg->pdev->dev,
7900                         "Adapter not supported in this hardware configuration.\n");
7901
7902                 if (!ipr_testmode) {
7903                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7904                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7905                         list_add_tail(&ipr_cmd->queue,
7906                                         &ioa_cfg->hrrq->hrrq_free_q);
7907                         return IPR_RC_JOB_RETURN;
7908                 }
7909         }
7910
7911         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7912
7913         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7914                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7915                           sizeof(struct ipr_inquiry_page0));
7916
7917         LEAVE;
7918         return IPR_RC_JOB_RETURN;
7919 }
7920
7921 /**
7922  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7923  * @ipr_cmd:    ipr command struct
7924  *
7925  * This function sends a standard inquiry to the adapter.
7926  *
7927  * Return value:
7928  *      IPR_RC_JOB_RETURN
7929  **/
7930 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7931 {
7932         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7933
7934         ENTER;
7935         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7936
7937         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7938                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7939                           sizeof(struct ipr_ioa_vpd));
7940
7941         LEAVE;
7942         return IPR_RC_JOB_RETURN;
7943 }
7944
7945 /**
7946  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7947  * @ipr_cmd:    ipr command struct
7948  *
7949  * This function send an Identify Host Request Response Queue
7950  * command to establish the HRRQ with the adapter.
7951  *
7952  * Return value:
7953  *      IPR_RC_JOB_RETURN
7954  **/
7955 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7956 {
7957         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7958         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7959         struct ipr_hrr_queue *hrrq;
7960
7961         ENTER;
7962         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7963         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7964
7965         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7966                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7967
7968                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7969                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7970
7971                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7972                 if (ioa_cfg->sis64)
7973                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7974
7975                 if (ioa_cfg->nvectors == 1)
7976                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7977                 else
7978                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7979
7980                 ioarcb->cmd_pkt.cdb[2] =
7981                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7982                 ioarcb->cmd_pkt.cdb[3] =
7983                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7984                 ioarcb->cmd_pkt.cdb[4] =
7985                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7986                 ioarcb->cmd_pkt.cdb[5] =
7987                         ((u64) hrrq->host_rrq_dma) & 0xff;
7988                 ioarcb->cmd_pkt.cdb[7] =
7989                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7990                 ioarcb->cmd_pkt.cdb[8] =
7991                         (sizeof(u32) * hrrq->size) & 0xff;
7992
7993                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7994                         ioarcb->cmd_pkt.cdb[9] =
7995                                         ioa_cfg->identify_hrrq_index;
7996
7997                 if (ioa_cfg->sis64) {
7998                         ioarcb->cmd_pkt.cdb[10] =
7999                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8000                         ioarcb->cmd_pkt.cdb[11] =
8001                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8002                         ioarcb->cmd_pkt.cdb[12] =
8003                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8004                         ioarcb->cmd_pkt.cdb[13] =
8005                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8006                 }
8007
8008                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8009                         ioarcb->cmd_pkt.cdb[14] =
8010                                         ioa_cfg->identify_hrrq_index;
8011
8012                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8013                            IPR_INTERNAL_TIMEOUT);
8014
8015                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8016                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8017
8018                 LEAVE;
8019                 return IPR_RC_JOB_RETURN;
8020         }
8021
8022         LEAVE;
8023         return IPR_RC_JOB_CONTINUE;
8024 }
8025
8026 /**
8027  * ipr_reset_timer_done - Adapter reset timer function
8028  * @ipr_cmd:    ipr command struct
8029  *
8030  * Description: This function is used in adapter reset processing
8031  * for timing events. If the reset_cmd pointer in the IOA
8032  * config struct is not this adapter's we are doing nested
8033  * resets and fail_all_ops will take care of freeing the
8034  * command block.
8035  *
8036  * Return value:
8037  *      none
8038  **/
8039 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8040 {
8041         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8042         unsigned long lock_flags = 0;
8043
8044         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8045
8046         if (ioa_cfg->reset_cmd == ipr_cmd) {
8047                 list_del(&ipr_cmd->queue);
8048                 ipr_cmd->done(ipr_cmd);
8049         }
8050
8051         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8052 }
8053
8054 /**
8055  * ipr_reset_start_timer - Start a timer for adapter reset job
8056  * @ipr_cmd:    ipr command struct
8057  * @timeout:    timeout value
8058  *
8059  * Description: This function is used in adapter reset processing
8060  * for timing events. If the reset_cmd pointer in the IOA
8061  * config struct is not this adapter's we are doing nested
8062  * resets and fail_all_ops will take care of freeing the
8063  * command block.
8064  *
8065  * Return value:
8066  *      none
8067  **/
8068 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8069                                   unsigned long timeout)
8070 {
8071
8072         ENTER;
8073         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8074         ipr_cmd->done = ipr_reset_ioa_job;
8075
8076         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8077         ipr_cmd->timer.expires = jiffies + timeout;
8078         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8079         add_timer(&ipr_cmd->timer);
8080 }
8081
8082 /**
8083  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8084  * @ioa_cfg:    ioa cfg struct
8085  *
8086  * Return value:
8087  *      nothing
8088  **/
8089 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8090 {
8091         struct ipr_hrr_queue *hrrq;
8092
8093         for_each_hrrq(hrrq, ioa_cfg) {
8094                 spin_lock(&hrrq->_lock);
8095                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8096
8097                 /* Initialize Host RRQ pointers */
8098                 hrrq->hrrq_start = hrrq->host_rrq;
8099                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8100                 hrrq->hrrq_curr = hrrq->hrrq_start;
8101                 hrrq->toggle_bit = 1;
8102                 spin_unlock(&hrrq->_lock);
8103         }
8104         wmb();
8105
8106         ioa_cfg->identify_hrrq_index = 0;
8107         if (ioa_cfg->hrrq_num == 1)
8108                 atomic_set(&ioa_cfg->hrrq_index, 0);
8109         else
8110                 atomic_set(&ioa_cfg->hrrq_index, 1);
8111
8112         /* Zero out config table */
8113         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8114 }
8115
8116 /**
8117  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8118  * @ipr_cmd:    ipr command struct
8119  *
8120  * Return value:
8121  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8122  **/
8123 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8124 {
8125         unsigned long stage, stage_time;
8126         u32 feedback;
8127         volatile u32 int_reg;
8128         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8129         u64 maskval = 0;
8130
8131         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8132         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8133         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8134
8135         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8136
8137         /* sanity check the stage_time value */
8138         if (stage_time == 0)
8139                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8140         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8141                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8142         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8143                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8144
8145         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8146                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8147                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8148                 stage_time = ioa_cfg->transop_timeout;
8149                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8150         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8151                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8152                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8153                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8154                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8155                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8156                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8157                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8158                         return IPR_RC_JOB_CONTINUE;
8159                 }
8160         }
8161
8162         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8163         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8164         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8165         ipr_cmd->done = ipr_reset_ioa_job;
8166         add_timer(&ipr_cmd->timer);
8167
8168         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8169
8170         return IPR_RC_JOB_RETURN;
8171 }
8172
8173 /**
8174  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8175  * @ipr_cmd:    ipr command struct
8176  *
8177  * This function reinitializes some control blocks and
8178  * enables destructive diagnostics on the adapter.
8179  *
8180  * Return value:
8181  *      IPR_RC_JOB_RETURN
8182  **/
8183 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8184 {
8185         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8186         volatile u32 int_reg;
8187         volatile u64 maskval;
8188         int i;
8189
8190         ENTER;
8191         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8192         ipr_init_ioa_mem(ioa_cfg);
8193
8194         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8195                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8196                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8197                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8198         }
8199         wmb();
8200         if (ioa_cfg->sis64) {
8201                 /* Set the adapter to the correct endian mode. */
8202                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8203                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8204         }
8205
8206         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8207
8208         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8209                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8210                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8211                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8212                 return IPR_RC_JOB_CONTINUE;
8213         }
8214
8215         /* Enable destructive diagnostics on IOA */
8216         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8217
8218         if (ioa_cfg->sis64) {
8219                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8220                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8221                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8222         } else
8223                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8224
8225         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8226
8227         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8228
8229         if (ioa_cfg->sis64) {
8230                 ipr_cmd->job_step = ipr_reset_next_stage;
8231                 return IPR_RC_JOB_CONTINUE;
8232         }
8233
8234         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8235         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8236         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8237         ipr_cmd->done = ipr_reset_ioa_job;
8238         add_timer(&ipr_cmd->timer);
8239         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8240
8241         LEAVE;
8242         return IPR_RC_JOB_RETURN;
8243 }
8244
8245 /**
8246  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8247  * @ipr_cmd:    ipr command struct
8248  *
8249  * This function is invoked when an adapter dump has run out
8250  * of processing time.
8251  *
8252  * Return value:
8253  *      IPR_RC_JOB_CONTINUE
8254  **/
8255 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8256 {
8257         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8258
8259         if (ioa_cfg->sdt_state == GET_DUMP)
8260                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8261         else if (ioa_cfg->sdt_state == READ_DUMP)
8262                 ioa_cfg->sdt_state = ABORT_DUMP;
8263
8264         ioa_cfg->dump_timeout = 1;
8265         ipr_cmd->job_step = ipr_reset_alert;
8266
8267         return IPR_RC_JOB_CONTINUE;
8268 }
8269
8270 /**
8271  * ipr_unit_check_no_data - Log a unit check/no data error log
8272  * @ioa_cfg:            ioa config struct
8273  *
8274  * Logs an error indicating the adapter unit checked, but for some
8275  * reason, we were unable to fetch the unit check buffer.
8276  *
8277  * Return value:
8278  *      nothing
8279  **/
8280 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8281 {
8282         ioa_cfg->errors_logged++;
8283         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8284 }
8285
8286 /**
8287  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8288  * @ioa_cfg:            ioa config struct
8289  *
8290  * Fetches the unit check buffer from the adapter by clocking the data
8291  * through the mailbox register.
8292  *
8293  * Return value:
8294  *      nothing
8295  **/
8296 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8297 {
8298         unsigned long mailbox;
8299         struct ipr_hostrcb *hostrcb;
8300         struct ipr_uc_sdt sdt;
8301         int rc, length;
8302         u32 ioasc;
8303
8304         mailbox = readl(ioa_cfg->ioa_mailbox);
8305
8306         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8307                 ipr_unit_check_no_data(ioa_cfg);
8308                 return;
8309         }
8310
8311         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8312         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8313                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8314
8315         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8316             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8317             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8318                 ipr_unit_check_no_data(ioa_cfg);
8319                 return;
8320         }
8321
8322         /* Find length of the first sdt entry (UC buffer) */
8323         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8324                 length = be32_to_cpu(sdt.entry[0].end_token);
8325         else
8326                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8327                           be32_to_cpu(sdt.entry[0].start_token)) &
8328                           IPR_FMT2_MBX_ADDR_MASK;
8329
8330         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8331                              struct ipr_hostrcb, queue);
8332         list_del(&hostrcb->queue);
8333         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8334
8335         rc = ipr_get_ldump_data_section(ioa_cfg,
8336                                         be32_to_cpu(sdt.entry[0].start_token),
8337                                         (__be32 *)&hostrcb->hcam,
8338                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8339
8340         if (!rc) {
8341                 ipr_handle_log_data(ioa_cfg, hostrcb);
8342                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8343                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8344                     ioa_cfg->sdt_state == GET_DUMP)
8345                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8346         } else
8347                 ipr_unit_check_no_data(ioa_cfg);
8348
8349         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8350 }
8351
8352 /**
8353  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8354  * @ipr_cmd:    ipr command struct
8355  *
8356  * Description: This function will call to get the unit check buffer.
8357  *
8358  * Return value:
8359  *      IPR_RC_JOB_RETURN
8360  **/
8361 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8362 {
8363         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8364
8365         ENTER;
8366         ioa_cfg->ioa_unit_checked = 0;
8367         ipr_get_unit_check_buffer(ioa_cfg);
8368         ipr_cmd->job_step = ipr_reset_alert;
8369         ipr_reset_start_timer(ipr_cmd, 0);
8370
8371         LEAVE;
8372         return IPR_RC_JOB_RETURN;
8373 }
8374
8375 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8376 {
8377         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8378
8379         ENTER;
8380
8381         if (ioa_cfg->sdt_state != GET_DUMP)
8382                 return IPR_RC_JOB_RETURN;
8383
8384         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8385             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8386              IPR_PCII_MAILBOX_STABLE)) {
8387
8388                 if (!ipr_cmd->u.time_left)
8389                         dev_err(&ioa_cfg->pdev->dev,
8390                                 "Timed out waiting for Mailbox register.\n");
8391
8392                 ioa_cfg->sdt_state = READ_DUMP;
8393                 ioa_cfg->dump_timeout = 0;
8394                 if (ioa_cfg->sis64)
8395                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8396                 else
8397                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8398                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8399                 schedule_work(&ioa_cfg->work_q);
8400
8401         } else {
8402                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8403                 ipr_reset_start_timer(ipr_cmd,
8404                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8405         }
8406
8407         LEAVE;
8408         return IPR_RC_JOB_RETURN;
8409 }
8410
8411 /**
8412  * ipr_reset_restore_cfg_space - Restore PCI config space.
8413  * @ipr_cmd:    ipr command struct
8414  *
8415  * Description: This function restores the saved PCI config space of
8416  * the adapter, fails all outstanding ops back to the callers, and
8417  * fetches the dump/unit check if applicable to this reset.
8418  *
8419  * Return value:
8420  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8421  **/
8422 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8423 {
8424         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8425         u32 int_reg;
8426
8427         ENTER;
8428         ioa_cfg->pdev->state_saved = true;
8429         pci_restore_state(ioa_cfg->pdev);
8430
8431         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8432                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8433                 return IPR_RC_JOB_CONTINUE;
8434         }
8435
8436         ipr_fail_all_ops(ioa_cfg);
8437
8438         if (ioa_cfg->sis64) {
8439                 /* Set the adapter to the correct endian mode. */
8440                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8441                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8442         }
8443
8444         if (ioa_cfg->ioa_unit_checked) {
8445                 if (ioa_cfg->sis64) {
8446                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8447                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8448                         return IPR_RC_JOB_RETURN;
8449                 } else {
8450                         ioa_cfg->ioa_unit_checked = 0;
8451                         ipr_get_unit_check_buffer(ioa_cfg);
8452                         ipr_cmd->job_step = ipr_reset_alert;
8453                         ipr_reset_start_timer(ipr_cmd, 0);
8454                         return IPR_RC_JOB_RETURN;
8455                 }
8456         }
8457
8458         if (ioa_cfg->in_ioa_bringdown) {
8459                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8460         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8461                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8462                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8463         } else {
8464                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8465         }
8466
8467         LEAVE;
8468         return IPR_RC_JOB_CONTINUE;
8469 }
8470
8471 /**
8472  * ipr_reset_bist_done - BIST has completed on the adapter.
8473  * @ipr_cmd:    ipr command struct
8474  *
8475  * Description: Unblock config space and resume the reset process.
8476  *
8477  * Return value:
8478  *      IPR_RC_JOB_CONTINUE
8479  **/
8480 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8481 {
8482         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8483
8484         ENTER;
8485         if (ioa_cfg->cfg_locked)
8486                 pci_cfg_access_unlock(ioa_cfg->pdev);
8487         ioa_cfg->cfg_locked = 0;
8488         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8489         LEAVE;
8490         return IPR_RC_JOB_CONTINUE;
8491 }
8492
8493 /**
8494  * ipr_reset_start_bist - Run BIST on the adapter.
8495  * @ipr_cmd:    ipr command struct
8496  *
8497  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8498  *
8499  * Return value:
8500  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8501  **/
8502 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8503 {
8504         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8505         int rc = PCIBIOS_SUCCESSFUL;
8506
8507         ENTER;
8508         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8509                 writel(IPR_UPROCI_SIS64_START_BIST,
8510                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8511         else
8512                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8513
8514         if (rc == PCIBIOS_SUCCESSFUL) {
8515                 ipr_cmd->job_step = ipr_reset_bist_done;
8516                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8517                 rc = IPR_RC_JOB_RETURN;
8518         } else {
8519                 if (ioa_cfg->cfg_locked)
8520                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8521                 ioa_cfg->cfg_locked = 0;
8522                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8523                 rc = IPR_RC_JOB_CONTINUE;
8524         }
8525
8526         LEAVE;
8527         return rc;
8528 }
8529
8530 /**
8531  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8532  * @ipr_cmd:    ipr command struct
8533  *
8534  * Description: This clears PCI reset to the adapter and delays two seconds.
8535  *
8536  * Return value:
8537  *      IPR_RC_JOB_RETURN
8538  **/
8539 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8540 {
8541         ENTER;
8542         ipr_cmd->job_step = ipr_reset_bist_done;
8543         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8544         LEAVE;
8545         return IPR_RC_JOB_RETURN;
8546 }
8547
8548 /**
8549  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8550  * @work:       work struct
8551  *
8552  * Description: This pulses warm reset to a slot.
8553  *
8554  **/
8555 static void ipr_reset_reset_work(struct work_struct *work)
8556 {
8557         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8558         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8559         struct pci_dev *pdev = ioa_cfg->pdev;
8560         unsigned long lock_flags = 0;
8561
8562         ENTER;
8563         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8564         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8565         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8566
8567         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8568         if (ioa_cfg->reset_cmd == ipr_cmd)
8569                 ipr_reset_ioa_job(ipr_cmd);
8570         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8571         LEAVE;
8572 }
8573
8574 /**
8575  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8576  * @ipr_cmd:    ipr command struct
8577  *
8578  * Description: This asserts PCI reset to the adapter.
8579  *
8580  * Return value:
8581  *      IPR_RC_JOB_RETURN
8582  **/
8583 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8584 {
8585         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8586
8587         ENTER;
8588         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8589         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8590         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8591         LEAVE;
8592         return IPR_RC_JOB_RETURN;
8593 }
8594
8595 /**
8596  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8597  * @ipr_cmd:    ipr command struct
8598  *
8599  * Description: This attempts to block config access to the IOA.
8600  *
8601  * Return value:
8602  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8603  **/
8604 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8605 {
8606         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8607         int rc = IPR_RC_JOB_CONTINUE;
8608
8609         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8610                 ioa_cfg->cfg_locked = 1;
8611                 ipr_cmd->job_step = ioa_cfg->reset;
8612         } else {
8613                 if (ipr_cmd->u.time_left) {
8614                         rc = IPR_RC_JOB_RETURN;
8615                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8616                         ipr_reset_start_timer(ipr_cmd,
8617                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8618                 } else {
8619                         ipr_cmd->job_step = ioa_cfg->reset;
8620                         dev_err(&ioa_cfg->pdev->dev,
8621                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8622                 }
8623         }
8624
8625         return rc;
8626 }
8627
8628 /**
8629  * ipr_reset_block_config_access - Block config access to the IOA
8630  * @ipr_cmd:    ipr command struct
8631  *
8632  * Description: This attempts to block config access to the IOA
8633  *
8634  * Return value:
8635  *      IPR_RC_JOB_CONTINUE
8636  **/
8637 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8638 {
8639         ipr_cmd->ioa_cfg->cfg_locked = 0;
8640         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8641         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8642         return IPR_RC_JOB_CONTINUE;
8643 }
8644
8645 /**
8646  * ipr_reset_allowed - Query whether or not IOA can be reset
8647  * @ioa_cfg:    ioa config struct
8648  *
8649  * Return value:
8650  *      0 if reset not allowed / non-zero if reset is allowed
8651  **/
8652 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8653 {
8654         volatile u32 temp_reg;
8655
8656         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8657         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8658 }
8659
8660 /**
8661  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8662  * @ipr_cmd:    ipr command struct
8663  *
8664  * Description: This function waits for adapter permission to run BIST,
8665  * then runs BIST. If the adapter does not give permission after a
8666  * reasonable time, we will reset the adapter anyway. The impact of
8667  * resetting the adapter without warning the adapter is the risk of
8668  * losing the persistent error log on the adapter. If the adapter is
8669  * reset while it is writing to the flash on the adapter, the flash
8670  * segment will have bad ECC and be zeroed.
8671  *
8672  * Return value:
8673  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8674  **/
8675 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8676 {
8677         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8678         int rc = IPR_RC_JOB_RETURN;
8679
8680         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8681                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8682                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8683         } else {
8684                 ipr_cmd->job_step = ipr_reset_block_config_access;
8685                 rc = IPR_RC_JOB_CONTINUE;
8686         }
8687
8688         return rc;
8689 }
8690
8691 /**
8692  * ipr_reset_alert - Alert the adapter of a pending reset
8693  * @ipr_cmd:    ipr command struct
8694  *
8695  * Description: This function alerts the adapter that it will be reset.
8696  * If memory space is not currently enabled, proceed directly
8697  * to running BIST on the adapter. The timer must always be started
8698  * so we guarantee we do not run BIST from ipr_isr.
8699  *
8700  * Return value:
8701  *      IPR_RC_JOB_RETURN
8702  **/
8703 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8704 {
8705         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8706         u16 cmd_reg;
8707         int rc;
8708
8709         ENTER;
8710         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8711
8712         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8713                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8714                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8715                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8716         } else {
8717                 ipr_cmd->job_step = ipr_reset_block_config_access;
8718         }
8719
8720         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8721         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8722
8723         LEAVE;
8724         return IPR_RC_JOB_RETURN;
8725 }
8726
8727 /**
8728  * ipr_reset_quiesce_done - Complete IOA disconnect
8729  * @ipr_cmd:    ipr command struct
8730  *
8731  * Description: Freeze the adapter to complete quiesce processing
8732  *
8733  * Return value:
8734  *      IPR_RC_JOB_CONTINUE
8735  **/
8736 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8737 {
8738         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8739
8740         ENTER;
8741         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8742         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8743         LEAVE;
8744         return IPR_RC_JOB_CONTINUE;
8745 }
8746
8747 /**
8748  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8749  * @ipr_cmd:    ipr command struct
8750  *
8751  * Description: Ensure nothing is outstanding to the IOA and
8752  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8753  *
8754  * Return value:
8755  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8756  **/
8757 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8758 {
8759         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8760         struct ipr_cmnd *loop_cmd;
8761         struct ipr_hrr_queue *hrrq;
8762         int rc = IPR_RC_JOB_CONTINUE;
8763         int count = 0;
8764
8765         ENTER;
8766         ipr_cmd->job_step = ipr_reset_quiesce_done;
8767
8768         for_each_hrrq(hrrq, ioa_cfg) {
8769                 spin_lock(&hrrq->_lock);
8770                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8771                         count++;
8772                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8773                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8774                         rc = IPR_RC_JOB_RETURN;
8775                         break;
8776                 }
8777                 spin_unlock(&hrrq->_lock);
8778
8779                 if (count)
8780                         break;
8781         }
8782
8783         LEAVE;
8784         return rc;
8785 }
8786
8787 /**
8788  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8789  * @ipr_cmd:    ipr command struct
8790  *
8791  * Description: Cancel any oustanding HCAMs to the IOA.
8792  *
8793  * Return value:
8794  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8795  **/
8796 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8797 {
8798         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8799         int rc = IPR_RC_JOB_CONTINUE;
8800         struct ipr_cmd_pkt *cmd_pkt;
8801         struct ipr_cmnd *hcam_cmd;
8802         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8803
8804         ENTER;
8805         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8806
8807         if (!hrrq->ioa_is_dead) {
8808                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8809                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8810                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8811                                         continue;
8812
8813                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8814                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8815                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8816                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8817                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8818                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8819                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8820                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8821                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8822                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8823                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8824                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8825                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8826                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8827
8828                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8829                                            IPR_CANCEL_TIMEOUT);
8830
8831                                 rc = IPR_RC_JOB_RETURN;
8832                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8833                                 break;
8834                         }
8835                 }
8836         } else
8837                 ipr_cmd->job_step = ipr_reset_alert;
8838
8839         LEAVE;
8840         return rc;
8841 }
8842
8843 /**
8844  * ipr_reset_ucode_download_done - Microcode download completion
8845  * @ipr_cmd:    ipr command struct
8846  *
8847  * Description: This function unmaps the microcode download buffer.
8848  *
8849  * Return value:
8850  *      IPR_RC_JOB_CONTINUE
8851  **/
8852 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8853 {
8854         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8855         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8856
8857         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8858                      sglist->num_sg, DMA_TO_DEVICE);
8859
8860         ipr_cmd->job_step = ipr_reset_alert;
8861         return IPR_RC_JOB_CONTINUE;
8862 }
8863
8864 /**
8865  * ipr_reset_ucode_download - Download microcode to the adapter
8866  * @ipr_cmd:    ipr command struct
8867  *
8868  * Description: This function checks to see if it there is microcode
8869  * to download to the adapter. If there is, a download is performed.
8870  *
8871  * Return value:
8872  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8873  **/
8874 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8875 {
8876         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8877         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8878
8879         ENTER;
8880         ipr_cmd->job_step = ipr_reset_alert;
8881
8882         if (!sglist)
8883                 return IPR_RC_JOB_CONTINUE;
8884
8885         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8886         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8887         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8888         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8889         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8890         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8891         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8892
8893         if (ioa_cfg->sis64)
8894                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8895         else
8896                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8897         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8898
8899         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8900                    IPR_WRITE_BUFFER_TIMEOUT);
8901
8902         LEAVE;
8903         return IPR_RC_JOB_RETURN;
8904 }
8905
8906 /**
8907  * ipr_reset_shutdown_ioa - Shutdown the adapter
8908  * @ipr_cmd:    ipr command struct
8909  *
8910  * Description: This function issues an adapter shutdown of the
8911  * specified type to the specified adapter as part of the
8912  * adapter reset job.
8913  *
8914  * Return value:
8915  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8916  **/
8917 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8918 {
8919         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8920         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8921         unsigned long timeout;
8922         int rc = IPR_RC_JOB_CONTINUE;
8923
8924         ENTER;
8925         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8926                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8927         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8928                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8929                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8930                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8931                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8932                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8933
8934                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8935                         timeout = IPR_SHUTDOWN_TIMEOUT;
8936                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8937                         timeout = IPR_INTERNAL_TIMEOUT;
8938                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8939                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8940                 else
8941                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8942
8943                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8944
8945                 rc = IPR_RC_JOB_RETURN;
8946                 ipr_cmd->job_step = ipr_reset_ucode_download;
8947         } else
8948                 ipr_cmd->job_step = ipr_reset_alert;
8949
8950         LEAVE;
8951         return rc;
8952 }
8953
8954 /**
8955  * ipr_reset_ioa_job - Adapter reset job
8956  * @ipr_cmd:    ipr command struct
8957  *
8958  * Description: This function is the job router for the adapter reset job.
8959  *
8960  * Return value:
8961  *      none
8962  **/
8963 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8964 {
8965         u32 rc, ioasc;
8966         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8967
8968         do {
8969                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8970
8971                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8972                         /*
8973                          * We are doing nested adapter resets and this is
8974                          * not the current reset job.
8975                          */
8976                         list_add_tail(&ipr_cmd->queue,
8977                                         &ipr_cmd->hrrq->hrrq_free_q);
8978                         return;
8979                 }
8980
8981                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8982                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8983                         if (rc == IPR_RC_JOB_RETURN)
8984                                 return;
8985                 }
8986
8987                 ipr_reinit_ipr_cmnd(ipr_cmd);
8988                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8989                 rc = ipr_cmd->job_step(ipr_cmd);
8990         } while (rc == IPR_RC_JOB_CONTINUE);
8991 }
8992
8993 /**
8994  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8995  * @ioa_cfg:            ioa config struct
8996  * @job_step:           first job step of reset job
8997  * @shutdown_type:      shutdown type
8998  *
8999  * Description: This function will initiate the reset of the given adapter
9000  * starting at the selected job step.
9001  * If the caller needs to wait on the completion of the reset,
9002  * the caller must sleep on the reset_wait_q.
9003  *
9004  * Return value:
9005  *      none
9006  **/
9007 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9008                                     int (*job_step) (struct ipr_cmnd *),
9009                                     enum ipr_shutdown_type shutdown_type)
9010 {
9011         struct ipr_cmnd *ipr_cmd;
9012         int i;
9013
9014         ioa_cfg->in_reset_reload = 1;
9015         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9016                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9017                 ioa_cfg->hrrq[i].allow_cmds = 0;
9018                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9019         }
9020         wmb();
9021         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9022                 scsi_block_requests(ioa_cfg->host);
9023
9024         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9025         ioa_cfg->reset_cmd = ipr_cmd;
9026         ipr_cmd->job_step = job_step;
9027         ipr_cmd->u.shutdown_type = shutdown_type;
9028
9029         ipr_reset_ioa_job(ipr_cmd);
9030 }
9031
9032 /**
9033  * ipr_initiate_ioa_reset - Initiate an adapter reset
9034  * @ioa_cfg:            ioa config struct
9035  * @shutdown_type:      shutdown type
9036  *
9037  * Description: This function will initiate the reset of the given adapter.
9038  * If the caller needs to wait on the completion of the reset,
9039  * the caller must sleep on the reset_wait_q.
9040  *
9041  * Return value:
9042  *      none
9043  **/
9044 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9045                                    enum ipr_shutdown_type shutdown_type)
9046 {
9047         int i;
9048
9049         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9050                 return;
9051
9052         if (ioa_cfg->in_reset_reload) {
9053                 if (ioa_cfg->sdt_state == GET_DUMP)
9054                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9055                 else if (ioa_cfg->sdt_state == READ_DUMP)
9056                         ioa_cfg->sdt_state = ABORT_DUMP;
9057         }
9058
9059         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9060                 dev_err(&ioa_cfg->pdev->dev,
9061                         "IOA taken offline - error recovery failed\n");
9062
9063                 ioa_cfg->reset_retries = 0;
9064                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9065                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9066                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9067                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9068                 }
9069                 wmb();
9070
9071                 if (ioa_cfg->in_ioa_bringdown) {
9072                         ioa_cfg->reset_cmd = NULL;
9073                         ioa_cfg->in_reset_reload = 0;
9074                         ipr_fail_all_ops(ioa_cfg);
9075                         wake_up_all(&ioa_cfg->reset_wait_q);
9076
9077                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9078                                 spin_unlock_irq(ioa_cfg->host->host_lock);
9079                                 scsi_unblock_requests(ioa_cfg->host);
9080                                 spin_lock_irq(ioa_cfg->host->host_lock);
9081                         }
9082                         return;
9083                 } else {
9084                         ioa_cfg->in_ioa_bringdown = 1;
9085                         shutdown_type = IPR_SHUTDOWN_NONE;
9086                 }
9087         }
9088
9089         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9090                                 shutdown_type);
9091 }
9092
9093 /**
9094  * ipr_reset_freeze - Hold off all I/O activity
9095  * @ipr_cmd:    ipr command struct
9096  *
9097  * Description: If the PCI slot is frozen, hold off all I/O
9098  * activity; then, as soon as the slot is available again,
9099  * initiate an adapter reset.
9100  */
9101 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9102 {
9103         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9104         int i;
9105
9106         /* Disallow new interrupts, avoid loop */
9107         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9108                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9109                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9110                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9111         }
9112         wmb();
9113         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9114         ipr_cmd->done = ipr_reset_ioa_job;
9115         return IPR_RC_JOB_RETURN;
9116 }
9117
9118 /**
9119  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9120  * @pdev:       PCI device struct
9121  *
9122  * Description: This routine is called to tell us that the MMIO
9123  * access to the IOA has been restored
9124  */
9125 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9126 {
9127         unsigned long flags = 0;
9128         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9129
9130         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9131         if (!ioa_cfg->probe_done)
9132                 pci_save_state(pdev);
9133         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9134         return PCI_ERS_RESULT_NEED_RESET;
9135 }
9136
9137 /**
9138  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9139  * @pdev:       PCI device struct
9140  *
9141  * Description: This routine is called to tell us that the PCI bus
9142  * is down. Can't do anything here, except put the device driver
9143  * into a holding pattern, waiting for the PCI bus to come back.
9144  */
9145 static void ipr_pci_frozen(struct pci_dev *pdev)
9146 {
9147         unsigned long flags = 0;
9148         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9149
9150         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9151         if (ioa_cfg->probe_done)
9152                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9153         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9154 }
9155
9156 /**
9157  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9158  * @pdev:       PCI device struct
9159  *
9160  * Description: This routine is called by the pci error recovery
9161  * code after the PCI slot has been reset, just before we
9162  * should resume normal operations.
9163  */
9164 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9165 {
9166         unsigned long flags = 0;
9167         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9168
9169         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9170         if (ioa_cfg->probe_done) {
9171                 if (ioa_cfg->needs_warm_reset)
9172                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9173                 else
9174                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9175                                                 IPR_SHUTDOWN_NONE);
9176         } else
9177                 wake_up_all(&ioa_cfg->eeh_wait_q);
9178         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9179         return PCI_ERS_RESULT_RECOVERED;
9180 }
9181
9182 /**
9183  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9184  * @pdev:       PCI device struct
9185  *
9186  * Description: This routine is called when the PCI bus has
9187  * permanently failed.
9188  */
9189 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9190 {
9191         unsigned long flags = 0;
9192         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9193         int i;
9194
9195         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9196         if (ioa_cfg->probe_done) {
9197                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9198                         ioa_cfg->sdt_state = ABORT_DUMP;
9199                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9200                 ioa_cfg->in_ioa_bringdown = 1;
9201                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9202                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9203                         ioa_cfg->hrrq[i].allow_cmds = 0;
9204                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9205                 }
9206                 wmb();
9207                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9208         } else
9209                 wake_up_all(&ioa_cfg->eeh_wait_q);
9210         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9211 }
9212
9213 /**
9214  * ipr_pci_error_detected - Called when a PCI error is detected.
9215  * @pdev:       PCI device struct
9216  * @state:      PCI channel state
9217  *
9218  * Description: Called when a PCI error is detected.
9219  *
9220  * Return value:
9221  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9222  */
9223 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9224                                                pci_channel_state_t state)
9225 {
9226         switch (state) {
9227         case pci_channel_io_frozen:
9228                 ipr_pci_frozen(pdev);
9229                 return PCI_ERS_RESULT_CAN_RECOVER;
9230         case pci_channel_io_perm_failure:
9231                 ipr_pci_perm_failure(pdev);
9232                 return PCI_ERS_RESULT_DISCONNECT;
9233                 break;
9234         default:
9235                 break;
9236         }
9237         return PCI_ERS_RESULT_NEED_RESET;
9238 }
9239
9240 /**
9241  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9242  * @ioa_cfg:    ioa cfg struct
9243  *
9244  * Description: This is the second phase of adapter intialization
9245  * This function takes care of initilizing the adapter to the point
9246  * where it can accept new commands.
9247
9248  * Return value:
9249  *      0 on success / -EIO on failure
9250  **/
9251 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9252 {
9253         int rc = 0;
9254         unsigned long host_lock_flags = 0;
9255
9256         ENTER;
9257         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9258         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9259         ioa_cfg->probe_done = 1;
9260         if (ioa_cfg->needs_hard_reset) {
9261                 ioa_cfg->needs_hard_reset = 0;
9262                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9263         } else
9264                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9265                                         IPR_SHUTDOWN_NONE);
9266         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9267
9268         LEAVE;
9269         return rc;
9270 }
9271
9272 /**
9273  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9274  * @ioa_cfg:    ioa config struct
9275  *
9276  * Return value:
9277  *      none
9278  **/
9279 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9280 {
9281         int i;
9282
9283         if (ioa_cfg->ipr_cmnd_list) {
9284                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9285                         if (ioa_cfg->ipr_cmnd_list[i])
9286                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9287                                               ioa_cfg->ipr_cmnd_list[i],
9288                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9289
9290                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9291                 }
9292         }
9293
9294         if (ioa_cfg->ipr_cmd_pool)
9295                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9296
9297         kfree(ioa_cfg->ipr_cmnd_list);
9298         kfree(ioa_cfg->ipr_cmnd_list_dma);
9299         ioa_cfg->ipr_cmnd_list = NULL;
9300         ioa_cfg->ipr_cmnd_list_dma = NULL;
9301         ioa_cfg->ipr_cmd_pool = NULL;
9302 }
9303
9304 /**
9305  * ipr_free_mem - Frees memory allocated for an adapter
9306  * @ioa_cfg:    ioa cfg struct
9307  *
9308  * Return value:
9309  *      nothing
9310  **/
9311 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9312 {
9313         int i;
9314
9315         kfree(ioa_cfg->res_entries);
9316         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9317                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9318         ipr_free_cmd_blks(ioa_cfg);
9319
9320         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9321                 dma_free_coherent(&ioa_cfg->pdev->dev,
9322                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9323                                   ioa_cfg->hrrq[i].host_rrq,
9324                                   ioa_cfg->hrrq[i].host_rrq_dma);
9325
9326         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9327                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9328
9329         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9330                 dma_free_coherent(&ioa_cfg->pdev->dev,
9331                                   sizeof(struct ipr_hostrcb),
9332                                   ioa_cfg->hostrcb[i],
9333                                   ioa_cfg->hostrcb_dma[i]);
9334         }
9335
9336         ipr_free_dump(ioa_cfg);
9337         kfree(ioa_cfg->trace);
9338 }
9339
9340 /**
9341  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9342  * @ioa_cfg:    ipr cfg struct
9343  *
9344  * This function frees all allocated IRQs for the
9345  * specified adapter.
9346  *
9347  * Return value:
9348  *      none
9349  **/
9350 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9351 {
9352         struct pci_dev *pdev = ioa_cfg->pdev;
9353
9354         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9355             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9356                 int i;
9357                 for (i = 0; i < ioa_cfg->nvectors; i++)
9358                         free_irq(ioa_cfg->vectors_info[i].vec,
9359                                  &ioa_cfg->hrrq[i]);
9360         } else
9361                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9362
9363         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9364                 pci_disable_msi(pdev);
9365                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9366         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9367                 pci_disable_msix(pdev);
9368                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9369         }
9370 }
9371
9372 /**
9373  * ipr_free_all_resources - Free all allocated resources for an adapter.
9374  * @ipr_cmd:    ipr command struct
9375  *
9376  * This function frees all allocated resources for the
9377  * specified adapter.
9378  *
9379  * Return value:
9380  *      none
9381  **/
9382 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9383 {
9384         struct pci_dev *pdev = ioa_cfg->pdev;
9385
9386         ENTER;
9387         ipr_free_irqs(ioa_cfg);
9388         if (ioa_cfg->reset_work_q)
9389                 destroy_workqueue(ioa_cfg->reset_work_q);
9390         iounmap(ioa_cfg->hdw_dma_regs);
9391         pci_release_regions(pdev);
9392         ipr_free_mem(ioa_cfg);
9393         scsi_host_put(ioa_cfg->host);
9394         pci_disable_device(pdev);
9395         LEAVE;
9396 }
9397
9398 /**
9399  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9400  * @ioa_cfg:    ioa config struct
9401  *
9402  * Return value:
9403  *      0 on success / -ENOMEM on allocation failure
9404  **/
9405 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9406 {
9407         struct ipr_cmnd *ipr_cmd;
9408         struct ipr_ioarcb *ioarcb;
9409         dma_addr_t dma_addr;
9410         int i, entries_each_hrrq, hrrq_id = 0;
9411
9412         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9413                                                 sizeof(struct ipr_cmnd), 512, 0);
9414
9415         if (!ioa_cfg->ipr_cmd_pool)
9416                 return -ENOMEM;
9417
9418         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9419         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9420
9421         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9422                 ipr_free_cmd_blks(ioa_cfg);
9423                 return -ENOMEM;
9424         }
9425
9426         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9427                 if (ioa_cfg->hrrq_num > 1) {
9428                         if (i == 0) {
9429                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9430                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9431                                         ioa_cfg->hrrq[i].max_cmd_id =
9432                                                 (entries_each_hrrq - 1);
9433                         } else {
9434                                 entries_each_hrrq =
9435                                         IPR_NUM_BASE_CMD_BLKS/
9436                                         (ioa_cfg->hrrq_num - 1);
9437                                 ioa_cfg->hrrq[i].min_cmd_id =
9438                                         IPR_NUM_INTERNAL_CMD_BLKS +
9439                                         (i - 1) * entries_each_hrrq;
9440                                 ioa_cfg->hrrq[i].max_cmd_id =
9441                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9442                                         i * entries_each_hrrq - 1);
9443                         }
9444                 } else {
9445                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9446                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9447                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9448                 }
9449                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9450         }
9451
9452         BUG_ON(ioa_cfg->hrrq_num == 0);
9453
9454         i = IPR_NUM_CMD_BLKS -
9455                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9456         if (i > 0) {
9457                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9458                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9459         }
9460
9461         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9462                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9463
9464                 if (!ipr_cmd) {
9465                         ipr_free_cmd_blks(ioa_cfg);
9466                         return -ENOMEM;
9467                 }
9468
9469                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9470                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9471                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9472
9473                 ioarcb = &ipr_cmd->ioarcb;
9474                 ipr_cmd->dma_addr = dma_addr;
9475                 if (ioa_cfg->sis64)
9476                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9477                 else
9478                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9479
9480                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9481                 if (ioa_cfg->sis64) {
9482                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9483                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9484                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9485                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9486                 } else {
9487                         ioarcb->write_ioadl_addr =
9488                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9489                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9490                         ioarcb->ioasa_host_pci_addr =
9491                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9492                 }
9493                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9494                 ipr_cmd->cmd_index = i;
9495                 ipr_cmd->ioa_cfg = ioa_cfg;
9496                 ipr_cmd->sense_buffer_dma = dma_addr +
9497                         offsetof(struct ipr_cmnd, sense_buffer);
9498
9499                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9500                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9501                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9502                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9503                         hrrq_id++;
9504         }
9505
9506         return 0;
9507 }
9508
9509 /**
9510  * ipr_alloc_mem - Allocate memory for an adapter
9511  * @ioa_cfg:    ioa config struct
9512  *
9513  * Return value:
9514  *      0 on success / non-zero for error
9515  **/
9516 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9517 {
9518         struct pci_dev *pdev = ioa_cfg->pdev;
9519         int i, rc = -ENOMEM;
9520
9521         ENTER;
9522         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9523                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9524
9525         if (!ioa_cfg->res_entries)
9526                 goto out;
9527
9528         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9529                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9530                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9531         }
9532
9533         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9534                                               sizeof(struct ipr_misc_cbs),
9535                                               &ioa_cfg->vpd_cbs_dma,
9536                                               GFP_KERNEL);
9537
9538         if (!ioa_cfg->vpd_cbs)
9539                 goto out_free_res_entries;
9540
9541         if (ipr_alloc_cmd_blks(ioa_cfg))
9542                 goto out_free_vpd_cbs;
9543
9544         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9545                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9546                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9547                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9548                                         GFP_KERNEL);
9549
9550                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9551                         while (--i > 0)
9552                                 dma_free_coherent(&pdev->dev,
9553                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9554                                         ioa_cfg->hrrq[i].host_rrq,
9555                                         ioa_cfg->hrrq[i].host_rrq_dma);
9556                         goto out_ipr_free_cmd_blocks;
9557                 }
9558                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9559         }
9560
9561         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9562                                                   ioa_cfg->cfg_table_size,
9563                                                   &ioa_cfg->cfg_table_dma,
9564                                                   GFP_KERNEL);
9565
9566         if (!ioa_cfg->u.cfg_table)
9567                 goto out_free_host_rrq;
9568
9569         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9570                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9571                                                          sizeof(struct ipr_hostrcb),
9572                                                          &ioa_cfg->hostrcb_dma[i],
9573                                                          GFP_KERNEL);
9574
9575                 if (!ioa_cfg->hostrcb[i])
9576                         goto out_free_hostrcb_dma;
9577
9578                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9579                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9580                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9581                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9582         }
9583
9584         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9585                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9586
9587         if (!ioa_cfg->trace)
9588                 goto out_free_hostrcb_dma;
9589
9590         rc = 0;
9591 out:
9592         LEAVE;
9593         return rc;
9594
9595 out_free_hostrcb_dma:
9596         while (i-- > 0) {
9597                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9598                                   ioa_cfg->hostrcb[i],
9599                                   ioa_cfg->hostrcb_dma[i]);
9600         }
9601         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9602                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9603 out_free_host_rrq:
9604         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9605                 dma_free_coherent(&pdev->dev,
9606                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9607                                   ioa_cfg->hrrq[i].host_rrq,
9608                                   ioa_cfg->hrrq[i].host_rrq_dma);
9609         }
9610 out_ipr_free_cmd_blocks:
9611         ipr_free_cmd_blks(ioa_cfg);
9612 out_free_vpd_cbs:
9613         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9614                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9615 out_free_res_entries:
9616         kfree(ioa_cfg->res_entries);
9617         goto out;
9618 }
9619
9620 /**
9621  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9622  * @ioa_cfg:    ioa config struct
9623  *
9624  * Return value:
9625  *      none
9626  **/
9627 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9628 {
9629         int i;
9630
9631         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9632                 ioa_cfg->bus_attr[i].bus = i;
9633                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9634                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9635                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9636                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9637                 else
9638                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9639         }
9640 }
9641
9642 /**
9643  * ipr_init_regs - Initialize IOA registers
9644  * @ioa_cfg:    ioa config struct
9645  *
9646  * Return value:
9647  *      none
9648  **/
9649 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9650 {
9651         const struct ipr_interrupt_offsets *p;
9652         struct ipr_interrupts *t;
9653         void __iomem *base;
9654
9655         p = &ioa_cfg->chip_cfg->regs;
9656         t = &ioa_cfg->regs;
9657         base = ioa_cfg->hdw_dma_regs;
9658
9659         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9660         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9661         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9662         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9663         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9664         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9665         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9666         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9667         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9668         t->ioarrin_reg = base + p->ioarrin_reg;
9669         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9670         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9671         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9672         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9673         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9674         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9675
9676         if (ioa_cfg->sis64) {
9677                 t->init_feedback_reg = base + p->init_feedback_reg;
9678                 t->dump_addr_reg = base + p->dump_addr_reg;
9679                 t->dump_data_reg = base + p->dump_data_reg;
9680                 t->endian_swap_reg = base + p->endian_swap_reg;
9681         }
9682 }
9683
9684 /**
9685  * ipr_init_ioa_cfg - Initialize IOA config struct
9686  * @ioa_cfg:    ioa config struct
9687  * @host:               scsi host struct
9688  * @pdev:               PCI dev struct
9689  *
9690  * Return value:
9691  *      none
9692  **/
9693 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9694                              struct Scsi_Host *host, struct pci_dev *pdev)
9695 {
9696         int i;
9697
9698         ioa_cfg->host = host;
9699         ioa_cfg->pdev = pdev;
9700         ioa_cfg->log_level = ipr_log_level;
9701         ioa_cfg->doorbell = IPR_DOORBELL;
9702         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9703         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9704         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9705         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9706         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9707         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9708
9709         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9710         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9711         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9712         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9713         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9714         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9715         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9716         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9717         ioa_cfg->sdt_state = INACTIVE;
9718
9719         ipr_initialize_bus_attr(ioa_cfg);
9720         ioa_cfg->max_devs_supported = ipr_max_devs;
9721
9722         if (ioa_cfg->sis64) {
9723                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9724                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9725                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9726                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9727                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9728                                            + ((sizeof(struct ipr_config_table_entry64)
9729                                                * ioa_cfg->max_devs_supported)));
9730         } else {
9731                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9732                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9733                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9734                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9735                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9736                                            + ((sizeof(struct ipr_config_table_entry)
9737                                                * ioa_cfg->max_devs_supported)));
9738         }
9739
9740         host->max_channel = IPR_VSET_BUS;
9741         host->unique_id = host->host_no;
9742         host->max_cmd_len = IPR_MAX_CDB_LEN;
9743         host->can_queue = ioa_cfg->max_cmds;
9744         pci_set_drvdata(pdev, ioa_cfg);
9745
9746         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9747                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9748                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9749                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9750                 if (i == 0)
9751                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9752                 else
9753                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9754         }
9755 }
9756
9757 /**
9758  * ipr_get_chip_info - Find adapter chip information
9759  * @dev_id:             PCI device id struct
9760  *
9761  * Return value:
9762  *      ptr to chip information on success / NULL on failure
9763  **/
9764 static const struct ipr_chip_t *
9765 ipr_get_chip_info(const struct pci_device_id *dev_id)
9766 {
9767         int i;
9768
9769         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9770                 if (ipr_chip[i].vendor == dev_id->vendor &&
9771                     ipr_chip[i].device == dev_id->device)
9772                         return &ipr_chip[i];
9773         return NULL;
9774 }
9775
9776 /**
9777  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9778  *                                              during probe time
9779  * @ioa_cfg:    ioa config struct
9780  *
9781  * Return value:
9782  *      None
9783  **/
9784 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9785 {
9786         struct pci_dev *pdev = ioa_cfg->pdev;
9787
9788         if (pci_channel_offline(pdev)) {
9789                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9790                                    !pci_channel_offline(pdev),
9791                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9792                 pci_restore_state(pdev);
9793         }
9794 }
9795
9796 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9797 {
9798         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9799         int i, vectors;
9800
9801         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9802                 entries[i].entry = i;
9803
9804         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9805                                         entries, 1, ipr_number_of_msix);
9806         if (vectors < 0) {
9807                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9808                 return vectors;
9809         }
9810
9811         for (i = 0; i < vectors; i++)
9812                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9813         ioa_cfg->nvectors = vectors;
9814
9815         return 0;
9816 }
9817
9818 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9819 {
9820         int i, vectors;
9821
9822         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9823         if (vectors < 0) {
9824                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9825                 return vectors;
9826         }
9827
9828         for (i = 0; i < vectors; i++)
9829                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9830         ioa_cfg->nvectors = vectors;
9831
9832         return 0;
9833 }
9834
9835 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9836 {
9837         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9838
9839         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9840                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9841                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9842                 ioa_cfg->vectors_info[vec_idx].
9843                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9844         }
9845 }
9846
9847 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9848 {
9849         int i, rc;
9850
9851         for (i = 1; i < ioa_cfg->nvectors; i++) {
9852                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9853                         ipr_isr_mhrrq,
9854                         0,
9855                         ioa_cfg->vectors_info[i].desc,
9856                         &ioa_cfg->hrrq[i]);
9857                 if (rc) {
9858                         while (--i >= 0)
9859                                 free_irq(ioa_cfg->vectors_info[i].vec,
9860                                         &ioa_cfg->hrrq[i]);
9861                         return rc;
9862                 }
9863         }
9864         return 0;
9865 }
9866
9867 /**
9868  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9869  * @pdev:               PCI device struct
9870  *
9871  * Description: Simply set the msi_received flag to 1 indicating that
9872  * Message Signaled Interrupts are supported.
9873  *
9874  * Return value:
9875  *      0 on success / non-zero on failure
9876  **/
9877 static irqreturn_t ipr_test_intr(int irq, void *devp)
9878 {
9879         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9880         unsigned long lock_flags = 0;
9881         irqreturn_t rc = IRQ_HANDLED;
9882
9883         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9884         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9885
9886         ioa_cfg->msi_received = 1;
9887         wake_up(&ioa_cfg->msi_wait_q);
9888
9889         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9890         return rc;
9891 }
9892
9893 /**
9894  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9895  * @pdev:               PCI device struct
9896  *
9897  * Description: The return value from pci_enable_msi_range() can not always be
9898  * trusted.  This routine sets up and initiates a test interrupt to determine
9899  * if the interrupt is received via the ipr_test_intr() service routine.
9900  * If the tests fails, the driver will fall back to LSI.
9901  *
9902  * Return value:
9903  *      0 on success / non-zero on failure
9904  **/
9905 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9906 {
9907         int rc;
9908         volatile u32 int_reg;
9909         unsigned long lock_flags = 0;
9910
9911         ENTER;
9912
9913         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9914         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9915         ioa_cfg->msi_received = 0;
9916         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9917         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9918         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9919         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9920
9921         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9922                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9923         else
9924                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9925         if (rc) {
9926                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9927                 return rc;
9928         } else if (ipr_debug)
9929                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9930
9931         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9932         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9933         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9934         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9935         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9936
9937         if (!ioa_cfg->msi_received) {
9938                 /* MSI test failed */
9939                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9940                 rc = -EOPNOTSUPP;
9941         } else if (ipr_debug)
9942                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9943
9944         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9945
9946         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9947                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9948         else
9949                 free_irq(pdev->irq, ioa_cfg);
9950
9951         LEAVE;
9952
9953         return rc;
9954 }
9955
9956  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9957  * @pdev:               PCI device struct
9958  * @dev_id:             PCI device id struct
9959  *
9960  * Return value:
9961  *      0 on success / non-zero on failure
9962  **/
9963 static int ipr_probe_ioa(struct pci_dev *pdev,
9964                          const struct pci_device_id *dev_id)
9965 {
9966         struct ipr_ioa_cfg *ioa_cfg;
9967         struct Scsi_Host *host;
9968         unsigned long ipr_regs_pci;
9969         void __iomem *ipr_regs;
9970         int rc = PCIBIOS_SUCCESSFUL;
9971         volatile u32 mask, uproc, interrupts;
9972         unsigned long lock_flags, driver_lock_flags;
9973
9974         ENTER;
9975
9976         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9977         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9978
9979         if (!host) {
9980                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9981                 rc = -ENOMEM;
9982                 goto out;
9983         }
9984
9985         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9986         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9987         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9988
9989         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9990
9991         if (!ioa_cfg->ipr_chip) {
9992                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9993                         dev_id->vendor, dev_id->device);
9994                 goto out_scsi_host_put;
9995         }
9996
9997         /* set SIS 32 or SIS 64 */
9998         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9999         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10000         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10001         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10002
10003         if (ipr_transop_timeout)
10004                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10005         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10006                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10007         else
10008                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10009
10010         ioa_cfg->revid = pdev->revision;
10011
10012         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10013
10014         ipr_regs_pci = pci_resource_start(pdev, 0);
10015
10016         rc = pci_request_regions(pdev, IPR_NAME);
10017         if (rc < 0) {
10018                 dev_err(&pdev->dev,
10019                         "Couldn't register memory range of registers\n");
10020                 goto out_scsi_host_put;
10021         }
10022
10023         rc = pci_enable_device(pdev);
10024
10025         if (rc || pci_channel_offline(pdev)) {
10026                 if (pci_channel_offline(pdev)) {
10027                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10028                         rc = pci_enable_device(pdev);
10029                 }
10030
10031                 if (rc) {
10032                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10033                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10034                         goto out_release_regions;
10035                 }
10036         }
10037
10038         ipr_regs = pci_ioremap_bar(pdev, 0);
10039
10040         if (!ipr_regs) {
10041                 dev_err(&pdev->dev,
10042                         "Couldn't map memory range of registers\n");
10043                 rc = -ENOMEM;
10044                 goto out_disable;
10045         }
10046
10047         ioa_cfg->hdw_dma_regs = ipr_regs;
10048         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10049         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10050
10051         ipr_init_regs(ioa_cfg);
10052
10053         if (ioa_cfg->sis64) {
10054                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10055                 if (rc < 0) {
10056                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10057                         rc = dma_set_mask_and_coherent(&pdev->dev,
10058                                                        DMA_BIT_MASK(32));
10059                 }
10060         } else
10061                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10062
10063         if (rc < 0) {
10064                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10065                 goto cleanup_nomem;
10066         }
10067
10068         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10069                                    ioa_cfg->chip_cfg->cache_line_size);
10070
10071         if (rc != PCIBIOS_SUCCESSFUL) {
10072                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10073                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10074                 rc = -EIO;
10075                 goto cleanup_nomem;
10076         }
10077
10078         /* Issue MMIO read to ensure card is not in EEH */
10079         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10080         ipr_wait_for_pci_err_recovery(ioa_cfg);
10081
10082         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10083                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10084                         IPR_MAX_MSIX_VECTORS);
10085                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10086         }
10087
10088         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10089                         ipr_enable_msix(ioa_cfg) == 0)
10090                 ioa_cfg->intr_flag = IPR_USE_MSIX;
10091         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10092                         ipr_enable_msi(ioa_cfg) == 0)
10093                 ioa_cfg->intr_flag = IPR_USE_MSI;
10094         else {
10095                 ioa_cfg->intr_flag = IPR_USE_LSI;
10096                 ioa_cfg->clear_isr = 1;
10097                 ioa_cfg->nvectors = 1;
10098                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10099         }
10100
10101         pci_set_master(pdev);
10102
10103         if (pci_channel_offline(pdev)) {
10104                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10105                 pci_set_master(pdev);
10106                 if (pci_channel_offline(pdev)) {
10107                         rc = -EIO;
10108                         goto out_msi_disable;
10109                 }
10110         }
10111
10112         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10113             ioa_cfg->intr_flag == IPR_USE_MSIX) {
10114                 rc = ipr_test_msi(ioa_cfg, pdev);
10115                 if (rc == -EOPNOTSUPP) {
10116                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10117                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10118                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10119                                 pci_disable_msi(pdev);
10120                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10121                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10122                                 pci_disable_msix(pdev);
10123                         }
10124
10125                         ioa_cfg->intr_flag = IPR_USE_LSI;
10126                         ioa_cfg->nvectors = 1;
10127                 }
10128                 else if (rc)
10129                         goto out_msi_disable;
10130                 else {
10131                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10132                                 dev_info(&pdev->dev,
10133                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
10134                                         ioa_cfg->nvectors, pdev->irq);
10135                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10136                                 dev_info(&pdev->dev,
10137                                         "Request for %d MSIXs succeeded.",
10138                                         ioa_cfg->nvectors);
10139                 }
10140         }
10141
10142         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10143                                 (unsigned int)num_online_cpus(),
10144                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10145
10146         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10147                 goto out_msi_disable;
10148
10149         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10150                 goto out_msi_disable;
10151
10152         rc = ipr_alloc_mem(ioa_cfg);
10153         if (rc < 0) {
10154                 dev_err(&pdev->dev,
10155                         "Couldn't allocate enough memory for device driver!\n");
10156                 goto out_msi_disable;
10157         }
10158
10159         /* Save away PCI config space for use following IOA reset */
10160         rc = pci_save_state(pdev);
10161
10162         if (rc != PCIBIOS_SUCCESSFUL) {
10163                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10164                 rc = -EIO;
10165                 goto cleanup_nolog;
10166         }
10167
10168         /*
10169          * If HRRQ updated interrupt is not masked, or reset alert is set,
10170          * the card is in an unknown state and needs a hard reset
10171          */
10172         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10173         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10174         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10175         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10176                 ioa_cfg->needs_hard_reset = 1;
10177         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10178                 ioa_cfg->needs_hard_reset = 1;
10179         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10180                 ioa_cfg->ioa_unit_checked = 1;
10181
10182         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10183         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10184         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10185
10186         if (ioa_cfg->intr_flag == IPR_USE_MSI
10187                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10188                 name_msi_vectors(ioa_cfg);
10189                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10190                         0,
10191                         ioa_cfg->vectors_info[0].desc,
10192                         &ioa_cfg->hrrq[0]);
10193                 if (!rc)
10194                         rc = ipr_request_other_msi_irqs(ioa_cfg);
10195         } else {
10196                 rc = request_irq(pdev->irq, ipr_isr,
10197                          IRQF_SHARED,
10198                          IPR_NAME, &ioa_cfg->hrrq[0]);
10199         }
10200         if (rc) {
10201                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10202                         pdev->irq, rc);
10203                 goto cleanup_nolog;
10204         }
10205
10206         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10207             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10208                 ioa_cfg->needs_warm_reset = 1;
10209                 ioa_cfg->reset = ipr_reset_slot_reset;
10210
10211                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10212                                                                 WQ_MEM_RECLAIM, host->host_no);
10213
10214                 if (!ioa_cfg->reset_work_q) {
10215                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10216                         goto out_free_irq;
10217                 }
10218         } else
10219                 ioa_cfg->reset = ipr_reset_start_bist;
10220
10221         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10222         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10223         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10224
10225         LEAVE;
10226 out:
10227         return rc;
10228
10229 out_free_irq:
10230         ipr_free_irqs(ioa_cfg);
10231 cleanup_nolog:
10232         ipr_free_mem(ioa_cfg);
10233 out_msi_disable:
10234         ipr_wait_for_pci_err_recovery(ioa_cfg);
10235         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10236                 pci_disable_msi(pdev);
10237         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10238                 pci_disable_msix(pdev);
10239 cleanup_nomem:
10240         iounmap(ipr_regs);
10241 out_disable:
10242         pci_disable_device(pdev);
10243 out_release_regions:
10244         pci_release_regions(pdev);
10245 out_scsi_host_put:
10246         scsi_host_put(host);
10247         goto out;
10248 }
10249
10250 /**
10251  * ipr_initiate_ioa_bringdown - Bring down an adapter
10252  * @ioa_cfg:            ioa config struct
10253  * @shutdown_type:      shutdown type
10254  *
10255  * Description: This function will initiate bringing down the adapter.
10256  * This consists of issuing an IOA shutdown to the adapter
10257  * to flush the cache, and running BIST.
10258  * If the caller needs to wait on the completion of the reset,
10259  * the caller must sleep on the reset_wait_q.
10260  *
10261  * Return value:
10262  *      none
10263  **/
10264 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10265                                        enum ipr_shutdown_type shutdown_type)
10266 {
10267         ENTER;
10268         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10269                 ioa_cfg->sdt_state = ABORT_DUMP;
10270         ioa_cfg->reset_retries = 0;
10271         ioa_cfg->in_ioa_bringdown = 1;
10272         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10273         LEAVE;
10274 }
10275
10276 /**
10277  * __ipr_remove - Remove a single adapter
10278  * @pdev:       pci device struct
10279  *
10280  * Adapter hot plug remove entry point.
10281  *
10282  * Return value:
10283  *      none
10284  **/
10285 static void __ipr_remove(struct pci_dev *pdev)
10286 {
10287         unsigned long host_lock_flags = 0;
10288         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10289         int i;
10290         unsigned long driver_lock_flags;
10291         ENTER;
10292
10293         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10294         while (ioa_cfg->in_reset_reload) {
10295                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10296                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10297                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10298         }
10299
10300         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10301                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10302                 ioa_cfg->hrrq[i].removing_ioa = 1;
10303                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10304         }
10305         wmb();
10306         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10307
10308         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10309         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10310         flush_work(&ioa_cfg->work_q);
10311         if (ioa_cfg->reset_work_q)
10312                 flush_workqueue(ioa_cfg->reset_work_q);
10313         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10314         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10315
10316         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10317         list_del(&ioa_cfg->queue);
10318         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10319
10320         if (ioa_cfg->sdt_state == ABORT_DUMP)
10321                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10322         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10323
10324         ipr_free_all_resources(ioa_cfg);
10325
10326         LEAVE;
10327 }
10328
10329 /**
10330  * ipr_remove - IOA hot plug remove entry point
10331  * @pdev:       pci device struct
10332  *
10333  * Adapter hot plug remove entry point.
10334  *
10335  * Return value:
10336  *      none
10337  **/
10338 static void ipr_remove(struct pci_dev *pdev)
10339 {
10340         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10341
10342         ENTER;
10343
10344         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10345                               &ipr_trace_attr);
10346         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10347                              &ipr_dump_attr);
10348         scsi_remove_host(ioa_cfg->host);
10349
10350         __ipr_remove(pdev);
10351
10352         LEAVE;
10353 }
10354
10355 /**
10356  * ipr_probe - Adapter hot plug add entry point
10357  *
10358  * Return value:
10359  *      0 on success / non-zero on failure
10360  **/
10361 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10362 {
10363         struct ipr_ioa_cfg *ioa_cfg;
10364         int rc, i;
10365
10366         rc = ipr_probe_ioa(pdev, dev_id);
10367
10368         if (rc)
10369                 return rc;
10370
10371         ioa_cfg = pci_get_drvdata(pdev);
10372         rc = ipr_probe_ioa_part2(ioa_cfg);
10373
10374         if (rc) {
10375                 __ipr_remove(pdev);
10376                 return rc;
10377         }
10378
10379         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10380
10381         if (rc) {
10382                 __ipr_remove(pdev);
10383                 return rc;
10384         }
10385
10386         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10387                                    &ipr_trace_attr);
10388
10389         if (rc) {
10390                 scsi_remove_host(ioa_cfg->host);
10391                 __ipr_remove(pdev);
10392                 return rc;
10393         }
10394
10395         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10396                                    &ipr_dump_attr);
10397
10398         if (rc) {
10399                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10400                                       &ipr_trace_attr);
10401                 scsi_remove_host(ioa_cfg->host);
10402                 __ipr_remove(pdev);
10403                 return rc;
10404         }
10405
10406         scsi_scan_host(ioa_cfg->host);
10407         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10408
10409         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10410                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10411                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10412                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10413                 }
10414         }
10415
10416         schedule_work(&ioa_cfg->work_q);
10417         return 0;
10418 }
10419
10420 /**
10421  * ipr_shutdown - Shutdown handler.
10422  * @pdev:       pci device struct
10423  *
10424  * This function is invoked upon system shutdown/reboot. It will issue
10425  * an adapter shutdown to the adapter to flush the write cache.
10426  *
10427  * Return value:
10428  *      none
10429  **/
10430 static void ipr_shutdown(struct pci_dev *pdev)
10431 {
10432         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10433         unsigned long lock_flags = 0;
10434         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10435         int i;
10436
10437         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10438         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10439                 ioa_cfg->iopoll_weight = 0;
10440                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10441                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10442         }
10443
10444         while (ioa_cfg->in_reset_reload) {
10445                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10446                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10447                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10448         }
10449
10450         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10451                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10452
10453         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10454         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10455         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10456         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10457                 ipr_free_irqs(ioa_cfg);
10458                 pci_disable_device(ioa_cfg->pdev);
10459         }
10460 }
10461
10462 static struct pci_device_id ipr_pci_table[] = {
10463         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10464                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10465         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10466                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10467         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10468                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10469         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10470                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10471         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10472                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10473         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10474                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10475         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10476                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10477         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10478                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10479                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10480         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10481               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10482         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10483               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10484               IPR_USE_LONG_TRANSOP_TIMEOUT },
10485         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10486               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10487               IPR_USE_LONG_TRANSOP_TIMEOUT },
10488         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10489               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10490         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10491               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10492               IPR_USE_LONG_TRANSOP_TIMEOUT},
10493         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10494               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10495               IPR_USE_LONG_TRANSOP_TIMEOUT },
10496         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10497               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10498               IPR_USE_LONG_TRANSOP_TIMEOUT },
10499         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10500               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10501         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10502               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10503         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10504               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10505               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10506         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10507                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10508         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10509                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10510         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10511                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10512                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10513         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10514                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10515                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10516         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10517                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10518         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10519                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10520         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10521                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10522         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10523                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10524         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10525                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10526         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10527                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10528         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10529                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10530         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10531                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10532         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10533                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10534         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10535                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10536         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10537                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10538         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10539                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10540         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10541                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10542         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10543                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10544         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10545                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10546         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10547                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10548         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10549                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10550         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10551                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10552         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10553                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10554         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10555                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10556         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10557                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10558         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10559                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10560         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10561                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10562         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10563                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10564         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10565                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10566         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10567                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10568         { }
10569 };
10570 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10571
10572 static const struct pci_error_handlers ipr_err_handler = {
10573         .error_detected = ipr_pci_error_detected,
10574         .mmio_enabled = ipr_pci_mmio_enabled,
10575         .slot_reset = ipr_pci_slot_reset,
10576 };
10577
10578 static struct pci_driver ipr_driver = {
10579         .name = IPR_NAME,
10580         .id_table = ipr_pci_table,
10581         .probe = ipr_probe,
10582         .remove = ipr_remove,
10583         .shutdown = ipr_shutdown,
10584         .err_handler = &ipr_err_handler,
10585 };
10586
10587 /**
10588  * ipr_halt_done - Shutdown prepare completion
10589  *
10590  * Return value:
10591  *      none
10592  **/
10593 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10594 {
10595         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10596 }
10597
10598 /**
10599  * ipr_halt - Issue shutdown prepare to all adapters
10600  *
10601  * Return value:
10602  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10603  **/
10604 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10605 {
10606         struct ipr_cmnd *ipr_cmd;
10607         struct ipr_ioa_cfg *ioa_cfg;
10608         unsigned long flags = 0, driver_lock_flags;
10609
10610         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10611                 return NOTIFY_DONE;
10612
10613         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10614
10615         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10616                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10617                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10618                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10619                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10620                         continue;
10621                 }
10622
10623                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10624                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10625                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10626                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10627                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10628
10629                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10630                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10631         }
10632         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10633
10634         return NOTIFY_OK;
10635 }
10636
10637 static struct notifier_block ipr_notifier = {
10638         ipr_halt, NULL, 0
10639 };
10640
10641 /**
10642  * ipr_init - Module entry point
10643  *
10644  * Return value:
10645  *      0 on success / negative value on failure
10646  **/
10647 static int __init ipr_init(void)
10648 {
10649         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10650                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10651
10652         register_reboot_notifier(&ipr_notifier);
10653         return pci_register_driver(&ipr_driver);
10654 }
10655
10656 /**
10657  * ipr_exit - Module unload
10658  *
10659  * Module unload entry point.
10660  *
10661  * Return value:
10662  *      none
10663  **/
10664 static void __exit ipr_exit(void)
10665 {
10666         unregister_reboot_notifier(&ipr_notifier);
10667         pci_unregister_driver(&ipr_driver);
10668 }
10669
10670 module_init(ipr_init);
10671 module_exit(ipr_exit);