]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/ipr.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 16;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
199 };
200
201 static int ipr_max_bus_speeds[] = {
202         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203 };
204
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed, ipr_max_speed, uint, 0);
208 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level, ipr_log_level, uint, 0);
210 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode, ipr_testmode, int, 0);
212 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
214 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
218 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs, ipr_max_devs, int, 0);
222 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
224 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
225 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
226 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION);
230
231 /*  A constant array of IOASCs/URCs/Error Messages */
232 static const
233 struct ipr_error_table_t ipr_error_table[] = {
234         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
235         "8155: An unknown error was received"},
236         {0x00330000, 0, 0,
237         "Soft underlength error"},
238         {0x005A0000, 0, 0,
239         "Command to be cancelled not found"},
240         {0x00808000, 0, 0,
241         "Qualified success"},
242         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
243         "FFFE: Soft device bus error recovered by the IOA"},
244         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
245         "4101: Soft device bus fabric error"},
246         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247         "FFFC: Logical block guard error recovered by the device"},
248         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249         "FFFC: Logical block reference tag error recovered by the device"},
250         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251         "4171: Recovered scatter list tag / sequence number error"},
252         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257         "FFFD: Recovered logical block reference tag error detected by the IOA"},
258         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259         "FFFD: Logical block guard error recovered by the IOA"},
260         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
261         "FFF9: Device sector reassign successful"},
262         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
263         "FFF7: Media error recovered by device rewrite procedures"},
264         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
265         "7001: IOA sector reassignment successful"},
266         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
267         "FFF9: Soft media error. Sector reassignment recommended"},
268         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
269         "FFF7: Media error recovered by IOA rewrite procedures"},
270         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
271         "FF3D: Soft PCI bus error recovered by the IOA"},
272         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
273         "FFF6: Device hardware error recovered by the IOA"},
274         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
275         "FFF6: Device hardware error recovered by the device"},
276         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
277         "FF3D: Soft IOA error recovered by the IOA"},
278         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
279         "FFFA: Undefined device response recovered by the IOA"},
280         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
281         "FFF6: Device bus error, message or command phase"},
282         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
283         "FFFE: Task Management Function failed"},
284         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
285         "FFF6: Failure prediction threshold exceeded"},
286         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
287         "8009: Impending cache battery pack failure"},
288         {0x02040100, 0, 0,
289         "Logical Unit in process of becoming ready"},
290         {0x02040200, 0, 0,
291         "Initializing command required"},
292         {0x02040400, 0, 0,
293         "34FF: Disk device format in progress"},
294         {0x02040C00, 0, 0,
295         "Logical unit not accessible, target port in unavailable state"},
296         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297         "9070: IOA requested reset"},
298         {0x023F0000, 0, 0,
299         "Synchronization required"},
300         {0x02408500, 0, 0,
301         "IOA microcode download required"},
302         {0x02408600, 0, 0,
303         "Device bus connection is prohibited by host"},
304         {0x024E0000, 0, 0,
305         "No ready, IOA shutdown"},
306         {0x025A0000, 0, 0,
307         "Not ready, IOA has been shutdown"},
308         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
309         "3020: Storage subsystem configuration error"},
310         {0x03110B00, 0, 0,
311         "FFF5: Medium error, data unreadable, recommend reassign"},
312         {0x03110C00, 0, 0,
313         "7000: Medium error, data unreadable, do not reassign"},
314         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
315         "FFF3: Disk media format bad"},
316         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
317         "3002: Addressed device failed to respond to selection"},
318         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
319         "3100: Device bus error"},
320         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
321         "3109: IOA timed out a device command"},
322         {0x04088000, 0, 0,
323         "3120: SCSI bus is not operational"},
324         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
325         "4100: Hard device bus fabric error"},
326         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327         "310C: Logical block guard error detected by the device"},
328         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329         "310C: Logical block reference tag error detected by the device"},
330         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331         "4170: Scatter list tag / sequence number error"},
332         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333         "8150: Logical block CRC error on IOA to Host transfer"},
334         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335         "4170: Logical block sequence number error on IOA to Host transfer"},
336         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337         "310D: Logical block reference tag error detected by the IOA"},
338         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339         "310D: Logical block guard error detected by the IOA"},
340         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
341         "9000: IOA reserved area data check"},
342         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
343         "9001: IOA reserved area invalid data pattern"},
344         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
345         "9002: IOA reserved area LRC error"},
346         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347         "Hardware Error, IOA metadata access error"},
348         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
349         "102E: Out of alternate sectors for disk storage"},
350         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
351         "FFF4: Data transfer underlength error"},
352         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
353         "FFF4: Data transfer overlength error"},
354         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
355         "3400: Logical unit failure"},
356         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
357         "FFF4: Device microcode is corrupt"},
358         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
359         "8150: PCI bus error"},
360         {0x04430000, 1, 0,
361         "Unsupported device bus message received"},
362         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
363         "FFF4: Disk device problem"},
364         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
365         "8150: Permanent IOA failure"},
366         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
367         "3010: Disk device returned wrong response to IOA"},
368         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
369         "8151: IOA microcode error"},
370         {0x04448500, 0, 0,
371         "Device bus status error"},
372         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
373         "8157: IOA error requiring IOA reset to recover"},
374         {0x04448700, 0, 0,
375         "ATA device status error"},
376         {0x04490000, 0, 0,
377         "Message reject received from the device"},
378         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
379         "8008: A permanent cache battery pack failure occurred"},
380         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
381         "9090: Disk unit has been modified after the last known status"},
382         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
383         "9081: IOA detected device error"},
384         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
385         "9082: IOA detected device error"},
386         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
387         "3110: Device bus error, message or command phase"},
388         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
389         "3110: SAS Command / Task Management Function failed"},
390         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
391         "9091: Incorrect hardware configuration change has been detected"},
392         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
393         "9073: Invalid multi-adapter configuration"},
394         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
395         "4010: Incorrect connection between cascaded expanders"},
396         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
397         "4020: Connections exceed IOA design limits"},
398         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
399         "4030: Incorrect multipath connection"},
400         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
401         "4110: Unsupported enclosure function"},
402         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403         "4120: SAS cable VPD cannot be read"},
404         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
405         "FFF4: Command to logical unit failed"},
406         {0x05240000, 1, 0,
407         "Illegal request, invalid request type or request packet"},
408         {0x05250000, 0, 0,
409         "Illegal request, invalid resource handle"},
410         {0x05258000, 0, 0,
411         "Illegal request, commands not allowed to this device"},
412         {0x05258100, 0, 0,
413         "Illegal request, command not allowed to a secondary adapter"},
414         {0x05258200, 0, 0,
415         "Illegal request, command not allowed to a non-optimized resource"},
416         {0x05260000, 0, 0,
417         "Illegal request, invalid field in parameter list"},
418         {0x05260100, 0, 0,
419         "Illegal request, parameter not supported"},
420         {0x05260200, 0, 0,
421         "Illegal request, parameter value invalid"},
422         {0x052C0000, 0, 0,
423         "Illegal request, command sequence error"},
424         {0x052C8000, 1, 0,
425         "Illegal request, dual adapter support not enabled"},
426         {0x052C8100, 1, 0,
427         "Illegal request, another cable connector was physically disabled"},
428         {0x054E8000, 1, 0,
429         "Illegal request, inconsistent group id/group count"},
430         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
431         "9031: Array protection temporarily suspended, protection resuming"},
432         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
433         "9040: Array protection temporarily suspended, protection resuming"},
434         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435         "4080: IOA exceeded maximum operating temperature"},
436         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437         "4085: Service required"},
438         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
439         "3140: Device bus not ready to ready transition"},
440         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
441         "FFFB: SCSI bus was reset"},
442         {0x06290500, 0, 0,
443         "FFFE: SCSI bus transition to single ended"},
444         {0x06290600, 0, 0,
445         "FFFE: SCSI bus transition to LVD"},
446         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
447         "FFFB: SCSI bus was reset by another initiator"},
448         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
449         "3029: A device replacement has occurred"},
450         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451         "4102: Device bus fabric performance degradation"},
452         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
453         "9051: IOA cache data exists for a missing or failed device"},
454         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
455         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
456         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
457         "9025: Disk unit is not supported at its physical location"},
458         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
459         "3020: IOA detected a SCSI bus configuration error"},
460         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
461         "3150: SCSI bus configuration error"},
462         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
463         "9074: Asymmetric advanced function disk configuration"},
464         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
465         "4040: Incomplete multipath connection between IOA and enclosure"},
466         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
467         "4041: Incomplete multipath connection between enclosure and device"},
468         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
469         "9075: Incomplete multipath connection between IOA and remote IOA"},
470         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
471         "9076: Configuration error, missing remote IOA"},
472         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
473         "4050: Enclosure does not support a required multipath function"},
474         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475         "4121: Configuration error, required cable is missing"},
476         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477         "4122: Cable is not plugged into the correct location on remote IOA"},
478         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479         "4123: Configuration error, invalid cable vital product data"},
480         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481         "4124: Configuration error, both cable ends are plugged into the same IOA"},
482         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483         "4070: Logically bad block written on device"},
484         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
485         "9041: Array protection temporarily suspended"},
486         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
487         "9042: Corrupt array parity detected on specified device"},
488         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
489         "9030: Array no longer protected due to missing or failed disk unit"},
490         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
491         "9071: Link operational transition"},
492         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
493         "9072: Link not operational transition"},
494         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495         "9032: Array exposed but still protected"},
496         {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497         "70DD: Device forced failed by disrupt device command"},
498         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499         "4061: Multipath redundancy level got better"},
500         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501         "4060: Multipath redundancy level got worse"},
502         {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503         "9083: Device raw mode enabled"},
504         {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505         "9084: Device raw mode disabled"},
506         {0x07270000, 0, 0,
507         "Failure due to other device"},
508         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
509         "9008: IOA does not support functions expected by devices"},
510         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
511         "9010: Cache data associated with attached devices cannot be found"},
512         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
513         "9011: Cache data belongs to devices other than those attached"},
514         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
515         "9020: Array missing 2 or more devices with only 1 device present"},
516         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
517         "9021: Array missing 2 or more devices with 2 or more devices present"},
518         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
519         "9022: Exposed array is missing a required device"},
520         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
521         "9023: Array member(s) not at required physical locations"},
522         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
523         "9024: Array not functional due to present hardware configuration"},
524         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
525         "9026: Array not functional due to present hardware configuration"},
526         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
527         "9027: Array is missing a device and parity is out of sync"},
528         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
529         "9028: Maximum number of arrays already exist"},
530         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
531         "9050: Required cache data cannot be located for a disk unit"},
532         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
533         "9052: Cache data exists for a device that has been modified"},
534         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
535         "9054: IOA resources not available due to previous problems"},
536         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
537         "9092: Disk unit requires initialization before use"},
538         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
539         "9029: Incorrect hardware configuration change has been detected"},
540         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
541         "9060: One or more disk pairs are missing from an array"},
542         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
543         "9061: One or more disks are missing from an array"},
544         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
545         "9062: One or more disks are missing from an array"},
546         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
547         "9063: Maximum number of functional arrays has been exceeded"},
548         {0x07279A00, 0, 0,
549         "Data protect, other volume set problem"},
550         {0x0B260000, 0, 0,
551         "Aborted command, invalid descriptor"},
552         {0x0B3F9000, 0, 0,
553         "Target operating conditions have changed, dual adapter takeover"},
554         {0x0B530200, 0, 0,
555         "Aborted command, medium removal prevented"},
556         {0x0B5A0000, 0, 0,
557         "Command terminated by host"},
558         {0x0B5B8000, 0, 0,
559         "Aborted command, command terminated by host"}
560 };
561
562 static const struct ipr_ses_table_entry ipr_ses_table[] = {
563         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
565         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
571         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
574         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576 };
577
578 /*
579  *  Function Prototypes
580  */
581 static int ipr_reset_alert(struct ipr_cmnd *);
582 static void ipr_process_ccn(struct ipr_cmnd *);
583 static void ipr_process_error(struct ipr_cmnd *);
584 static void ipr_reset_ioa_job(struct ipr_cmnd *);
585 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586                                    enum ipr_shutdown_type);
587
588 #ifdef CONFIG_SCSI_IPR_TRACE
589 /**
590  * ipr_trc_hook - Add a trace entry to the driver trace
591  * @ipr_cmd:    ipr command struct
592  * @type:               trace type
593  * @add_data:   additional data
594  *
595  * Return value:
596  *      none
597  **/
598 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599                          u8 type, u32 add_data)
600 {
601         struct ipr_trace_entry *trace_entry;
602         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
603         unsigned int trace_index;
604
605         trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606         trace_entry = &ioa_cfg->trace[trace_index];
607         trace_entry->time = jiffies;
608         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609         trace_entry->type = type;
610         if (ipr_cmd->ioa_cfg->sis64)
611                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612         else
613                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
614         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
615         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616         trace_entry->u.add_data = add_data;
617         wmb();
618 }
619 #else
620 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
621 #endif
622
623 /**
624  * ipr_lock_and_done - Acquire lock and complete command
625  * @ipr_cmd:    ipr command struct
626  *
627  * Return value:
628  *      none
629  **/
630 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631 {
632         unsigned long lock_flags;
633         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636         ipr_cmd->done(ipr_cmd);
637         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638 }
639
640 /**
641  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642  * @ipr_cmd:    ipr command struct
643  *
644  * Return value:
645  *      none
646  **/
647 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648 {
649         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
650         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
652         dma_addr_t dma_addr = ipr_cmd->dma_addr;
653         int hrrq_id;
654
655         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
656         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
657         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
658         ioarcb->data_transfer_length = 0;
659         ioarcb->read_data_transfer_length = 0;
660         ioarcb->ioadl_len = 0;
661         ioarcb->read_ioadl_len = 0;
662
663         if (ipr_cmd->ioa_cfg->sis64) {
664                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
666                 ioasa64->u.gata.status = 0;
667         } else {
668                 ioarcb->write_ioadl_addr =
669                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
671                 ioasa->u.gata.status = 0;
672         }
673
674         ioasa->hdr.ioasc = 0;
675         ioasa->hdr.residual_data_len = 0;
676         ipr_cmd->scsi_cmd = NULL;
677         ipr_cmd->qc = NULL;
678         ipr_cmd->sense_buffer[0] = 0;
679         ipr_cmd->dma_use_sg = 0;
680 }
681
682 /**
683  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684  * @ipr_cmd:    ipr command struct
685  *
686  * Return value:
687  *      none
688  **/
689 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690                               void (*fast_done) (struct ipr_cmnd *))
691 {
692         ipr_reinit_ipr_cmnd(ipr_cmd);
693         ipr_cmd->u.scratch = 0;
694         ipr_cmd->sibling = NULL;
695         ipr_cmd->eh_comp = NULL;
696         ipr_cmd->fast_done = fast_done;
697         init_timer(&ipr_cmd->timer);
698 }
699
700 /**
701  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
702  * @ioa_cfg:    ioa config struct
703  *
704  * Return value:
705  *      pointer to ipr command struct
706  **/
707 static
708 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
709 {
710         struct ipr_cmnd *ipr_cmd = NULL;
711
712         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714                         struct ipr_cmnd, queue);
715                 list_del(&ipr_cmd->queue);
716         }
717
718
719         return ipr_cmd;
720 }
721
722 /**
723  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724  * @ioa_cfg:    ioa config struct
725  *
726  * Return value:
727  *      pointer to ipr command struct
728  **/
729 static
730 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731 {
732         struct ipr_cmnd *ipr_cmd =
733                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
734         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
735         return ipr_cmd;
736 }
737
738 /**
739  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740  * @ioa_cfg:    ioa config struct
741  * @clr_ints:     interrupts to clear
742  *
743  * This function masks all interrupts on the adapter, then clears the
744  * interrupts specified in the mask
745  *
746  * Return value:
747  *      none
748  **/
749 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750                                           u32 clr_ints)
751 {
752         volatile u32 int_reg;
753         int i;
754
755         /* Stop new interrupts */
756         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757                 spin_lock(&ioa_cfg->hrrq[i]._lock);
758                 ioa_cfg->hrrq[i].allow_interrupts = 0;
759                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760         }
761         wmb();
762
763         /* Set interrupt mask to stop all new interrupts */
764         if (ioa_cfg->sis64)
765                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766         else
767                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768
769         /* Clear any pending interrupts */
770         if (ioa_cfg->sis64)
771                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
773         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774 }
775
776 /**
777  * ipr_save_pcix_cmd_reg - Save PCI-X command register
778  * @ioa_cfg:    ioa config struct
779  *
780  * Return value:
781  *      0 on success / -EIO on failure
782  **/
783 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784 {
785         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
787         if (pcix_cmd_reg == 0)
788                 return 0;
789
790         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793                 return -EIO;
794         }
795
796         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797         return 0;
798 }
799
800 /**
801  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802  * @ioa_cfg:    ioa config struct
803  *
804  * Return value:
805  *      0 on success / -EIO on failure
806  **/
807 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808 {
809         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811         if (pcix_cmd_reg) {
812                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815                         return -EIO;
816                 }
817         }
818
819         return 0;
820 }
821
822 /**
823  * __ipr_sata_eh_done - done function for aborted SATA commands
824  * @ipr_cmd:    ipr command struct
825  *
826  * This function is invoked for ops generated to SATA
827  * devices which are being aborted.
828  *
829  * Return value:
830  *      none
831  **/
832 static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833 {
834         struct ata_queued_cmd *qc = ipr_cmd->qc;
835         struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837         qc->err_mask |= AC_ERR_OTHER;
838         sata_port->ioasa.status |= ATA_BUSY;
839         ata_qc_complete(qc);
840         if (ipr_cmd->eh_comp)
841                 complete(ipr_cmd->eh_comp);
842         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843 }
844
845 /**
846  * ipr_sata_eh_done - done function for aborted SATA commands
847  * @ipr_cmd:    ipr command struct
848  *
849  * This function is invoked for ops generated to SATA
850  * devices which are being aborted.
851  *
852  * Return value:
853  *      none
854  **/
855 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856 {
857         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858         unsigned long hrrq_flags;
859
860         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861         __ipr_sata_eh_done(ipr_cmd);
862         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863 }
864
865 /**
866  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867  * @ipr_cmd:    ipr command struct
868  *
869  * This function is invoked by the interrupt handler for
870  * ops generated by the SCSI mid-layer which are being aborted.
871  *
872  * Return value:
873  *      none
874  **/
875 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876 {
877         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879         scsi_cmd->result |= (DID_ERROR << 16);
880
881         scsi_dma_unmap(ipr_cmd->scsi_cmd);
882         scsi_cmd->scsi_done(scsi_cmd);
883         if (ipr_cmd->eh_comp)
884                 complete(ipr_cmd->eh_comp);
885         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
886 }
887
888 /**
889  * ipr_scsi_eh_done - mid-layer done function for aborted ops
890  * @ipr_cmd:    ipr command struct
891  *
892  * This function is invoked by the interrupt handler for
893  * ops generated by the SCSI mid-layer which are being aborted.
894  *
895  * Return value:
896  *      none
897  **/
898 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899 {
900         unsigned long hrrq_flags;
901         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
902
903         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904         __ipr_scsi_eh_done(ipr_cmd);
905         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
906 }
907
908 /**
909  * ipr_fail_all_ops - Fails all outstanding ops.
910  * @ioa_cfg:    ioa config struct
911  *
912  * This function fails all outstanding ops.
913  *
914  * Return value:
915  *      none
916  **/
917 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918 {
919         struct ipr_cmnd *ipr_cmd, *temp;
920         struct ipr_hrr_queue *hrrq;
921
922         ENTER;
923         for_each_hrrq(hrrq, ioa_cfg) {
924                 spin_lock(&hrrq->_lock);
925                 list_for_each_entry_safe(ipr_cmd,
926                                         temp, &hrrq->hrrq_pending_q, queue) {
927                         list_del(&ipr_cmd->queue);
928
929                         ipr_cmd->s.ioasa.hdr.ioasc =
930                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931                         ipr_cmd->s.ioasa.hdr.ilid =
932                                 cpu_to_be32(IPR_DRIVER_ILID);
933
934                         if (ipr_cmd->scsi_cmd)
935                                 ipr_cmd->done = __ipr_scsi_eh_done;
936                         else if (ipr_cmd->qc)
937                                 ipr_cmd->done = __ipr_sata_eh_done;
938
939                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940                                      IPR_IOASC_IOA_WAS_RESET);
941                         del_timer(&ipr_cmd->timer);
942                         ipr_cmd->done(ipr_cmd);
943                 }
944                 spin_unlock(&hrrq->_lock);
945         }
946         LEAVE;
947 }
948
949 /**
950  * ipr_send_command -  Send driver initiated requests.
951  * @ipr_cmd:            ipr command struct
952  *
953  * This function sends a command to the adapter using the correct write call.
954  * In the case of sis64, calculate the ioarcb size required. Then or in the
955  * appropriate bits.
956  *
957  * Return value:
958  *      none
959  **/
960 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961 {
962         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965         if (ioa_cfg->sis64) {
966                 /* The default size is 256 bytes */
967                 send_dma_addr |= 0x1;
968
969                 /* If the number of ioadls * size of ioadl > 128 bytes,
970                    then use a 512 byte ioarcb */
971                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972                         send_dma_addr |= 0x4;
973                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974         } else
975                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 }
977
978 /**
979  * ipr_do_req -  Send driver initiated requests.
980  * @ipr_cmd:            ipr command struct
981  * @done:                       done function
982  * @timeout_func:       timeout function
983  * @timeout:            timeout value
984  *
985  * This function sends the specified command to the adapter with the
986  * timeout given. The done function is invoked on command completion.
987  *
988  * Return value:
989  *      none
990  **/
991 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992                        void (*done) (struct ipr_cmnd *),
993                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
994 {
995         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
996
997         ipr_cmd->done = done;
998
999         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
1000         ipr_cmd->timer.expires = jiffies + timeout;
1001         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
1002
1003         add_timer(&ipr_cmd->timer);
1004
1005         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
1007         ipr_send_command(ipr_cmd);
1008 }
1009
1010 /**
1011  * ipr_internal_cmd_done - Op done function for an internally generated op.
1012  * @ipr_cmd:    ipr command struct
1013  *
1014  * This function is the op done function for an internally generated,
1015  * blocking op. It simply wakes the sleeping thread.
1016  *
1017  * Return value:
1018  *      none
1019  **/
1020 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021 {
1022         if (ipr_cmd->sibling)
1023                 ipr_cmd->sibling = NULL;
1024         else
1025                 complete(&ipr_cmd->completion);
1026 }
1027
1028 /**
1029  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030  * @ipr_cmd:    ipr command struct
1031  * @dma_addr:   dma address
1032  * @len:        transfer length
1033  * @flags:      ioadl flag value
1034  *
1035  * This function initializes an ioadl in the case where there is only a single
1036  * descriptor.
1037  *
1038  * Return value:
1039  *      nothing
1040  **/
1041 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042                            u32 len, int flags)
1043 {
1044         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047         ipr_cmd->dma_use_sg = 1;
1048
1049         if (ipr_cmd->ioa_cfg->sis64) {
1050                 ioadl64->flags = cpu_to_be32(flags);
1051                 ioadl64->data_len = cpu_to_be32(len);
1052                 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054                 ipr_cmd->ioarcb.ioadl_len =
1055                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057         } else {
1058                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059                 ioadl->address = cpu_to_be32(dma_addr);
1060
1061                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062                         ipr_cmd->ioarcb.read_ioadl_len =
1063                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065                 } else {
1066                         ipr_cmd->ioarcb.ioadl_len =
1067                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069                 }
1070         }
1071 }
1072
1073 /**
1074  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075  * @ipr_cmd:    ipr command struct
1076  * @timeout_func:       function to invoke if command times out
1077  * @timeout:    timeout
1078  *
1079  * Return value:
1080  *      none
1081  **/
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1083                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1084                                   u32 timeout)
1085 {
1086         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088         init_completion(&ipr_cmd->completion);
1089         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091         spin_unlock_irq(ioa_cfg->host->host_lock);
1092         wait_for_completion(&ipr_cmd->completion);
1093         spin_lock_irq(ioa_cfg->host->host_lock);
1094 }
1095
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097 {
1098         unsigned int hrrq;
1099
1100         if (ioa_cfg->hrrq_num == 1)
1101                 hrrq = 0;
1102         else {
1103                 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104                 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105         }
1106         return hrrq;
1107 }
1108
1109 /**
1110  * ipr_send_hcam - Send an HCAM to the adapter.
1111  * @ioa_cfg:    ioa config struct
1112  * @type:               HCAM type
1113  * @hostrcb:    hostrcb struct
1114  *
1115  * This function will send a Host Controlled Async command to the adapter.
1116  * If HCAMs are currently not allowed to be issued to the adapter, it will
1117  * place the hostrcb on the free queue.
1118  *
1119  * Return value:
1120  *      none
1121  **/
1122 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123                           struct ipr_hostrcb *hostrcb)
1124 {
1125         struct ipr_cmnd *ipr_cmd;
1126         struct ipr_ioarcb *ioarcb;
1127
1128         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1129                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1130                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1131                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133                 ipr_cmd->u.hostrcb = hostrcb;
1134                 ioarcb = &ipr_cmd->ioarcb;
1135
1136                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139                 ioarcb->cmd_pkt.cdb[1] = type;
1140                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
1143                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1145
1146                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147                         ipr_cmd->done = ipr_process_ccn;
1148                 else
1149                         ipr_cmd->done = ipr_process_error;
1150
1151                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
1153                 ipr_send_command(ipr_cmd);
1154         } else {
1155                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156         }
1157 }
1158
1159 /**
1160  * ipr_update_ata_class - Update the ata class in the resource entry
1161  * @res:        resource entry struct
1162  * @proto:      cfgte device bus protocol value
1163  *
1164  * Return value:
1165  *      none
1166  **/
1167 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1168 {
1169         switch (proto) {
1170         case IPR_PROTO_SATA:
1171         case IPR_PROTO_SAS_STP:
1172                 res->ata_class = ATA_DEV_ATA;
1173                 break;
1174         case IPR_PROTO_SATA_ATAPI:
1175         case IPR_PROTO_SAS_STP_ATAPI:
1176                 res->ata_class = ATA_DEV_ATAPI;
1177                 break;
1178         default:
1179                 res->ata_class = ATA_DEV_UNKNOWN;
1180                 break;
1181         };
1182 }
1183
1184 /**
1185  * ipr_init_res_entry - Initialize a resource entry struct.
1186  * @res:        resource entry struct
1187  * @cfgtew:     config table entry wrapper struct
1188  *
1189  * Return value:
1190  *      none
1191  **/
1192 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193                                struct ipr_config_table_entry_wrapper *cfgtew)
1194 {
1195         int found = 0;
1196         unsigned int proto;
1197         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198         struct ipr_resource_entry *gscsi_res = NULL;
1199
1200         res->needs_sync_complete = 0;
1201         res->in_erp = 0;
1202         res->add_to_ml = 0;
1203         res->del_from_ml = 0;
1204         res->resetting_device = 0;
1205         res->reset_occurred = 0;
1206         res->sdev = NULL;
1207         res->sata_port = NULL;
1208
1209         if (ioa_cfg->sis64) {
1210                 proto = cfgtew->u.cfgte64->proto;
1211                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1213                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1214                 res->type = cfgtew->u.cfgte64->res_type;
1215
1216                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217                         sizeof(res->res_path));
1218
1219                 res->bus = 0;
1220                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221                         sizeof(res->dev_lun.scsi_lun));
1222                 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227                                         found = 1;
1228                                         res->target = gscsi_res->target;
1229                                         break;
1230                                 }
1231                         }
1232                         if (!found) {
1233                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234                                                                   ioa_cfg->max_devs_supported);
1235                                 set_bit(res->target, ioa_cfg->target_ids);
1236                         }
1237                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239                         res->target = 0;
1240                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243                                                           ioa_cfg->max_devs_supported);
1244                         set_bit(res->target, ioa_cfg->array_ids);
1245                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246                         res->bus = IPR_VSET_VIRTUAL_BUS;
1247                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248                                                           ioa_cfg->max_devs_supported);
1249                         set_bit(res->target, ioa_cfg->vset_ids);
1250                 } else {
1251                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252                                                           ioa_cfg->max_devs_supported);
1253                         set_bit(res->target, ioa_cfg->target_ids);
1254                 }
1255         } else {
1256                 proto = cfgtew->u.cfgte->proto;
1257                 res->qmodel = IPR_QUEUEING_MODEL(res);
1258                 res->flags = cfgtew->u.cfgte->flags;
1259                 if (res->flags & IPR_IS_IOA_RESOURCE)
1260                         res->type = IPR_RES_TYPE_IOAFP;
1261                 else
1262                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265                 res->target = cfgtew->u.cfgte->res_addr.target;
1266                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1267                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1268         }
1269
1270         ipr_update_ata_class(res, proto);
1271 }
1272
1273 /**
1274  * ipr_is_same_device - Determine if two devices are the same.
1275  * @res:        resource entry struct
1276  * @cfgtew:     config table entry wrapper struct
1277  *
1278  * Return value:
1279  *      1 if the devices are the same / 0 otherwise
1280  **/
1281 static int ipr_is_same_device(struct ipr_resource_entry *res,
1282                               struct ipr_config_table_entry_wrapper *cfgtew)
1283 {
1284         if (res->ioa_cfg->sis64) {
1285                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1287                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1288                                         sizeof(cfgtew->u.cfgte64->lun))) {
1289                         return 1;
1290                 }
1291         } else {
1292                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293                     res->target == cfgtew->u.cfgte->res_addr.target &&
1294                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1295                         return 1;
1296         }
1297
1298         return 0;
1299 }
1300
1301 /**
1302  * __ipr_format_res_path - Format the resource path for printing.
1303  * @res_path:   resource path
1304  * @buf:        buffer
1305  * @len:        length of buffer provided
1306  *
1307  * Return value:
1308  *      pointer to buffer
1309  **/
1310 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1311 {
1312         int i;
1313         char *p = buffer;
1314
1315         *p = '\0';
1316         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1319
1320         return buffer;
1321 }
1322
1323 /**
1324  * ipr_format_res_path - Format the resource path for printing.
1325  * @ioa_cfg:    ioa config struct
1326  * @res_path:   resource path
1327  * @buf:        buffer
1328  * @len:        length of buffer provided
1329  *
1330  * Return value:
1331  *      pointer to buffer
1332  **/
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334                                  u8 *res_path, char *buffer, int len)
1335 {
1336         char *p = buffer;
1337
1338         *p = '\0';
1339         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340         __ipr_format_res_path(res_path, p, len - (buffer - p));
1341         return buffer;
1342 }
1343
1344 /**
1345  * ipr_update_res_entry - Update the resource entry.
1346  * @res:        resource entry struct
1347  * @cfgtew:     config table entry wrapper struct
1348  *
1349  * Return value:
1350  *      none
1351  **/
1352 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353                                  struct ipr_config_table_entry_wrapper *cfgtew)
1354 {
1355         char buffer[IPR_MAX_RES_PATH_LENGTH];
1356         unsigned int proto;
1357         int new_path = 0;
1358
1359         if (res->ioa_cfg->sis64) {
1360                 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361                 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1362                 res->type = cfgtew->u.cfgte64->res_type;
1363
1364                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365                         sizeof(struct ipr_std_inq_data));
1366
1367                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368                 proto = cfgtew->u.cfgte64->proto;
1369                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373                         sizeof(res->dev_lun.scsi_lun));
1374
1375                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376                                         sizeof(res->res_path))) {
1377                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378                                 sizeof(res->res_path));
1379                         new_path = 1;
1380                 }
1381
1382                 if (res->sdev && new_path)
1383                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1384                                     ipr_format_res_path(res->ioa_cfg,
1385                                         res->res_path, buffer, sizeof(buffer)));
1386         } else {
1387                 res->flags = cfgtew->u.cfgte->flags;
1388                 if (res->flags & IPR_IS_IOA_RESOURCE)
1389                         res->type = IPR_RES_TYPE_IOAFP;
1390                 else
1391                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394                         sizeof(struct ipr_std_inq_data));
1395
1396                 res->qmodel = IPR_QUEUEING_MODEL(res);
1397                 proto = cfgtew->u.cfgte->proto;
1398                 res->res_handle = cfgtew->u.cfgte->res_handle;
1399         }
1400
1401         ipr_update_ata_class(res, proto);
1402 }
1403
1404 /**
1405  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406  *                        for the resource.
1407  * @res:        resource entry struct
1408  * @cfgtew:     config table entry wrapper struct
1409  *
1410  * Return value:
1411  *      none
1412  **/
1413 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414 {
1415         struct ipr_resource_entry *gscsi_res = NULL;
1416         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418         if (!ioa_cfg->sis64)
1419                 return;
1420
1421         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422                 clear_bit(res->target, ioa_cfg->array_ids);
1423         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424                 clear_bit(res->target, ioa_cfg->vset_ids);
1425         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428                                 return;
1429                 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431         } else if (res->bus == 0)
1432                 clear_bit(res->target, ioa_cfg->target_ids);
1433 }
1434
1435 /**
1436  * ipr_handle_config_change - Handle a config change from the adapter
1437  * @ioa_cfg:    ioa config struct
1438  * @hostrcb:    hostrcb
1439  *
1440  * Return value:
1441  *      none
1442  **/
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1444                                      struct ipr_hostrcb *hostrcb)
1445 {
1446         struct ipr_resource_entry *res = NULL;
1447         struct ipr_config_table_entry_wrapper cfgtew;
1448         __be32 cc_res_handle;
1449
1450         u32 is_ndn = 1;
1451
1452         if (ioa_cfg->sis64) {
1453                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455         } else {
1456                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458         }
1459
1460         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1461                 if (res->res_handle == cc_res_handle) {
1462                         is_ndn = 0;
1463                         break;
1464                 }
1465         }
1466
1467         if (is_ndn) {
1468                 if (list_empty(&ioa_cfg->free_res_q)) {
1469                         ipr_send_hcam(ioa_cfg,
1470                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471                                       hostrcb);
1472                         return;
1473                 }
1474
1475                 res = list_entry(ioa_cfg->free_res_q.next,
1476                                  struct ipr_resource_entry, queue);
1477
1478                 list_del(&res->queue);
1479                 ipr_init_res_entry(res, &cfgtew);
1480                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481         }
1482
1483         ipr_update_res_entry(res, &cfgtew);
1484
1485         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486                 if (res->sdev) {
1487                         res->del_from_ml = 1;
1488                         res->res_handle = IPR_INVALID_RES_HANDLE;
1489                         schedule_work(&ioa_cfg->work_q);
1490                 } else {
1491                         ipr_clear_res_target(res);
1492                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1493                 }
1494         } else if (!res->sdev || res->del_from_ml) {
1495                 res->add_to_ml = 1;
1496                 schedule_work(&ioa_cfg->work_q);
1497         }
1498
1499         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500 }
1501
1502 /**
1503  * ipr_process_ccn - Op done function for a CCN.
1504  * @ipr_cmd:    ipr command struct
1505  *
1506  * This function is the op done function for a configuration
1507  * change notification host controlled async from the adapter.
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513 {
1514         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1516         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1517
1518         list_del_init(&hostrcb->queue);
1519         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1520
1521         if (ioasc) {
1522                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1524                         dev_err(&ioa_cfg->pdev->dev,
1525                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528         } else {
1529                 ipr_handle_config_change(ioa_cfg, hostrcb);
1530         }
1531 }
1532
1533 /**
1534  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535  * @i:          index into buffer
1536  * @buf:                string to modify
1537  *
1538  * This function will strip all trailing whitespace, pad the end
1539  * of the string with a single space, and NULL terminate the string.
1540  *
1541  * Return value:
1542  *      new length of string
1543  **/
1544 static int strip_and_pad_whitespace(int i, char *buf)
1545 {
1546         while (i && buf[i] == ' ')
1547                 i--;
1548         buf[i+1] = ' ';
1549         buf[i+2] = '\0';
1550         return i + 2;
1551 }
1552
1553 /**
1554  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                 struct ipr_vpd *vpd)
1564 {
1565         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566         int i = 0;
1567
1568         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578 }
1579
1580 /**
1581  * ipr_log_vpd - Log the passed VPD to the error log.
1582  * @vpd:                vendor/product id/sn struct
1583  *
1584  * Return value:
1585  *      none
1586  **/
1587 static void ipr_log_vpd(struct ipr_vpd *vpd)
1588 {
1589         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590                     + IPR_SERIAL_NUM_LEN];
1591
1592         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1594                IPR_PROD_ID_LEN);
1595         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596         ipr_err("Vendor/Product ID: %s\n", buffer);
1597
1598         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1599         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600         ipr_err("    Serial Number: %s\n", buffer);
1601 }
1602
1603 /**
1604  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605  * @prefix:             string to print at start of printk
1606  * @hostrcb:    hostrcb pointer
1607  * @vpd:                vendor/product id/sn/wwn struct
1608  *
1609  * Return value:
1610  *      none
1611  **/
1612 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613                                     struct ipr_ext_vpd *vpd)
1614 {
1615         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618 }
1619
1620 /**
1621  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622  * @vpd:                vendor/product id/sn/wwn struct
1623  *
1624  * Return value:
1625  *      none
1626  **/
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628 {
1629         ipr_log_vpd(&vpd->vpd);
1630         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631                 be32_to_cpu(vpd->wwid[1]));
1632 }
1633
1634 /**
1635  * ipr_log_enhanced_cache_error - Log a cache error.
1636  * @ioa_cfg:    ioa config struct
1637  * @hostrcb:    hostrcb struct
1638  *
1639  * Return value:
1640  *      none
1641  **/
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643                                          struct ipr_hostrcb *hostrcb)
1644 {
1645         struct ipr_hostrcb_type_12_error *error;
1646
1647         if (ioa_cfg->sis64)
1648                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649         else
1650                 error = &hostrcb->hcam.u.error.u.type_12_error;
1651
1652         ipr_err("-----Current Configuration-----\n");
1653         ipr_err("Cache Directory Card Information:\n");
1654         ipr_log_ext_vpd(&error->ioa_vpd);
1655         ipr_err("Adapter Card Information:\n");
1656         ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658         ipr_err("-----Expected Configuration-----\n");
1659         ipr_err("Cache Directory Card Information:\n");
1660         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661         ipr_err("Adapter Card Information:\n");
1662         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665                      be32_to_cpu(error->ioa_data[0]),
1666                      be32_to_cpu(error->ioa_data[1]),
1667                      be32_to_cpu(error->ioa_data[2]));
1668 }
1669
1670 /**
1671  * ipr_log_cache_error - Log a cache error.
1672  * @ioa_cfg:    ioa config struct
1673  * @hostrcb:    hostrcb struct
1674  *
1675  * Return value:
1676  *      none
1677  **/
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679                                 struct ipr_hostrcb *hostrcb)
1680 {
1681         struct ipr_hostrcb_type_02_error *error =
1682                 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684         ipr_err("-----Current Configuration-----\n");
1685         ipr_err("Cache Directory Card Information:\n");
1686         ipr_log_vpd(&error->ioa_vpd);
1687         ipr_err("Adapter Card Information:\n");
1688         ipr_log_vpd(&error->cfc_vpd);
1689
1690         ipr_err("-----Expected Configuration-----\n");
1691         ipr_err("Cache Directory Card Information:\n");
1692         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1693         ipr_err("Adapter Card Information:\n");
1694         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1695
1696         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697                      be32_to_cpu(error->ioa_data[0]),
1698                      be32_to_cpu(error->ioa_data[1]),
1699                      be32_to_cpu(error->ioa_data[2]));
1700 }
1701
1702 /**
1703  * ipr_log_enhanced_config_error - Log a configuration error.
1704  * @ioa_cfg:    ioa config struct
1705  * @hostrcb:    hostrcb struct
1706  *
1707  * Return value:
1708  *      none
1709  **/
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711                                           struct ipr_hostrcb *hostrcb)
1712 {
1713         int errors_logged, i;
1714         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715         struct ipr_hostrcb_type_13_error *error;
1716
1717         error = &hostrcb->hcam.u.error.u.type_13_error;
1718         errors_logged = be32_to_cpu(error->errors_logged);
1719
1720         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721                 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723         dev_entry = error->dev;
1724
1725         for (i = 0; i < errors_logged; i++, dev_entry++) {
1726                 ipr_err_separator;
1727
1728                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729                 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731                 ipr_err("-----New Device Information-----\n");
1732                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734                 ipr_err("Cache Directory Card Information:\n");
1735                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737                 ipr_err("Adapter Card Information:\n");
1738                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739         }
1740 }
1741
1742 /**
1743  * ipr_log_sis64_config_error - Log a device error.
1744  * @ioa_cfg:    ioa config struct
1745  * @hostrcb:    hostrcb struct
1746  *
1747  * Return value:
1748  *      none
1749  **/
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751                                        struct ipr_hostrcb *hostrcb)
1752 {
1753         int errors_logged, i;
1754         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755         struct ipr_hostrcb_type_23_error *error;
1756         char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758         error = &hostrcb->hcam.u.error64.u.type_23_error;
1759         errors_logged = be32_to_cpu(error->errors_logged);
1760
1761         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762                 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764         dev_entry = error->dev;
1765
1766         for (i = 0; i < errors_logged; i++, dev_entry++) {
1767                 ipr_err_separator;
1768
1769                 ipr_err("Device %d : %s", i + 1,
1770                         __ipr_format_res_path(dev_entry->res_path,
1771                                               buffer, sizeof(buffer)));
1772                 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774                 ipr_err("-----New Device Information-----\n");
1775                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777                 ipr_err("Cache Directory Card Information:\n");
1778                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780                 ipr_err("Adapter Card Information:\n");
1781                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782         }
1783 }
1784
1785 /**
1786  * ipr_log_config_error - Log a configuration error.
1787  * @ioa_cfg:    ioa config struct
1788  * @hostrcb:    hostrcb struct
1789  *
1790  * Return value:
1791  *      none
1792  **/
1793 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794                                  struct ipr_hostrcb *hostrcb)
1795 {
1796         int errors_logged, i;
1797         struct ipr_hostrcb_device_data_entry *dev_entry;
1798         struct ipr_hostrcb_type_03_error *error;
1799
1800         error = &hostrcb->hcam.u.error.u.type_03_error;
1801         errors_logged = be32_to_cpu(error->errors_logged);
1802
1803         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804                 be32_to_cpu(error->errors_detected), errors_logged);
1805
1806         dev_entry = error->dev;
1807
1808         for (i = 0; i < errors_logged; i++, dev_entry++) {
1809                 ipr_err_separator;
1810
1811                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1812                 ipr_log_vpd(&dev_entry->vpd);
1813
1814                 ipr_err("-----New Device Information-----\n");
1815                 ipr_log_vpd(&dev_entry->new_vpd);
1816
1817                 ipr_err("Cache Directory Card Information:\n");
1818                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1819
1820                 ipr_err("Adapter Card Information:\n");
1821                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1822
1823                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824                         be32_to_cpu(dev_entry->ioa_data[0]),
1825                         be32_to_cpu(dev_entry->ioa_data[1]),
1826                         be32_to_cpu(dev_entry->ioa_data[2]),
1827                         be32_to_cpu(dev_entry->ioa_data[3]),
1828                         be32_to_cpu(dev_entry->ioa_data[4]));
1829         }
1830 }
1831
1832 /**
1833  * ipr_log_enhanced_array_error - Log an array configuration error.
1834  * @ioa_cfg:    ioa config struct
1835  * @hostrcb:    hostrcb struct
1836  *
1837  * Return value:
1838  *      none
1839  **/
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841                                          struct ipr_hostrcb *hostrcb)
1842 {
1843         int i, num_entries;
1844         struct ipr_hostrcb_type_14_error *error;
1845         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848         error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850         ipr_err_separator;
1851
1852         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853                 error->protection_level,
1854                 ioa_cfg->host->host_no,
1855                 error->last_func_vset_res_addr.bus,
1856                 error->last_func_vset_res_addr.target,
1857                 error->last_func_vset_res_addr.lun);
1858
1859         ipr_err_separator;
1860
1861         array_entry = error->array_member;
1862         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1863                             ARRAY_SIZE(error->array_member));
1864
1865         for (i = 0; i < num_entries; i++, array_entry++) {
1866                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867                         continue;
1868
1869                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870                         ipr_err("Exposed Array Member %d:\n", i);
1871                 else
1872                         ipr_err("Array Member %d:\n", i);
1873
1874                 ipr_log_ext_vpd(&array_entry->vpd);
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880         }
1881 }
1882
1883 /**
1884  * ipr_log_array_error - Log an array configuration error.
1885  * @ioa_cfg:    ioa config struct
1886  * @hostrcb:    hostrcb struct
1887  *
1888  * Return value:
1889  *      none
1890  **/
1891 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892                                 struct ipr_hostrcb *hostrcb)
1893 {
1894         int i;
1895         struct ipr_hostrcb_type_04_error *error;
1896         struct ipr_hostrcb_array_data_entry *array_entry;
1897         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899         error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901         ipr_err_separator;
1902
1903         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904                 error->protection_level,
1905                 ioa_cfg->host->host_no,
1906                 error->last_func_vset_res_addr.bus,
1907                 error->last_func_vset_res_addr.target,
1908                 error->last_func_vset_res_addr.lun);
1909
1910         ipr_err_separator;
1911
1912         array_entry = error->array_member;
1913
1914         for (i = 0; i < 18; i++) {
1915                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1916                         continue;
1917
1918                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1919                         ipr_err("Exposed Array Member %d:\n", i);
1920                 else
1921                         ipr_err("Array Member %d:\n", i);
1922
1923                 ipr_log_vpd(&array_entry->vpd);
1924
1925                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927                                  "Expected Location");
1928
1929                 ipr_err_separator;
1930
1931                 if (i == 9)
1932                         array_entry = error->array_member2;
1933                 else
1934                         array_entry++;
1935         }
1936 }
1937
1938 /**
1939  * ipr_log_hex_data - Log additional hex IOA error data.
1940  * @ioa_cfg:    ioa config struct
1941  * @data:               IOA error data
1942  * @len:                data length
1943  *
1944  * Return value:
1945  *      none
1946  **/
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1948 {
1949         int i;
1950
1951         if (len == 0)
1952                 return;
1953
1954         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
1957         for (i = 0; i < len / 4; i += 4) {
1958                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959                         be32_to_cpu(data[i]),
1960                         be32_to_cpu(data[i+1]),
1961                         be32_to_cpu(data[i+2]),
1962                         be32_to_cpu(data[i+3]));
1963         }
1964 }
1965
1966 /**
1967  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968  * @ioa_cfg:    ioa config struct
1969  * @hostrcb:    hostrcb struct
1970  *
1971  * Return value:
1972  *      none
1973  **/
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975                                             struct ipr_hostrcb *hostrcb)
1976 {
1977         struct ipr_hostrcb_type_17_error *error;
1978
1979         if (ioa_cfg->sis64)
1980                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981         else
1982                 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
1984         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1985         strim(error->failure_reason);
1986
1987         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1989         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1990         ipr_log_hex_data(ioa_cfg, error->data,
1991                          be32_to_cpu(hostrcb->hcam.length) -
1992                          (offsetof(struct ipr_hostrcb_error, u) +
1993                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1994 }
1995
1996 /**
1997  * ipr_log_dual_ioa_error - Log a dual adapter error.
1998  * @ioa_cfg:    ioa config struct
1999  * @hostrcb:    hostrcb struct
2000  *
2001  * Return value:
2002  *      none
2003  **/
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005                                    struct ipr_hostrcb *hostrcb)
2006 {
2007         struct ipr_hostrcb_type_07_error *error;
2008
2009         error = &hostrcb->hcam.u.error.u.type_07_error;
2010         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2011         strim(error->failure_reason);
2012
2013         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014                      be32_to_cpu(hostrcb->hcam.u.error.prc));
2015         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
2016         ipr_log_hex_data(ioa_cfg, error->data,
2017                          be32_to_cpu(hostrcb->hcam.length) -
2018                          (offsetof(struct ipr_hostrcb_error, u) +
2019                           offsetof(struct ipr_hostrcb_type_07_error, data)));
2020 }
2021
2022 static const struct {
2023         u8 active;
2024         char *desc;
2025 } path_active_desc[] = {
2026         { IPR_PATH_NO_INFO, "Path" },
2027         { IPR_PATH_ACTIVE, "Active path" },
2028         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029 };
2030
2031 static const struct {
2032         u8 state;
2033         char *desc;
2034 } path_state_desc[] = {
2035         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036         { IPR_PATH_HEALTHY, "is healthy" },
2037         { IPR_PATH_DEGRADED, "is degraded" },
2038         { IPR_PATH_FAILED, "is failed" }
2039 };
2040
2041 /**
2042  * ipr_log_fabric_path - Log a fabric path error
2043  * @hostrcb:    hostrcb struct
2044  * @fabric:             fabric descriptor
2045  *
2046  * Return value:
2047  *      none
2048  **/
2049 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050                                 struct ipr_hostrcb_fabric_desc *fabric)
2051 {
2052         int i, j;
2053         u8 path_state = fabric->path_state;
2054         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055         u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067                                              path_active_desc[i].desc, path_state_desc[j].desc,
2068                                              fabric->ioa_port);
2069                         } else if (fabric->cascaded_expander == 0xff) {
2070                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071                                              path_active_desc[i].desc, path_state_desc[j].desc,
2072                                              fabric->ioa_port, fabric->phy);
2073                         } else if (fabric->phy == 0xff) {
2074                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075                                              path_active_desc[i].desc, path_state_desc[j].desc,
2076                                              fabric->ioa_port, fabric->cascaded_expander);
2077                         } else {
2078                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079                                              path_active_desc[i].desc, path_state_desc[j].desc,
2080                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081                         }
2082                         return;
2083                 }
2084         }
2085
2086         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088 }
2089
2090 /**
2091  * ipr_log64_fabric_path - Log a fabric path error
2092  * @hostrcb:    hostrcb struct
2093  * @fabric:             fabric descriptor
2094  *
2095  * Return value:
2096  *      none
2097  **/
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099                                   struct ipr_hostrcb64_fabric_desc *fabric)
2100 {
2101         int i, j;
2102         u8 path_state = fabric->path_state;
2103         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104         u8 state = path_state & IPR_PATH_STATE_MASK;
2105         char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108                 if (path_active_desc[i].active != active)
2109                         continue;
2110
2111                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112                         if (path_state_desc[j].state != state)
2113                                 continue;
2114
2115                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116                                      path_active_desc[i].desc, path_state_desc[j].desc,
2117                                      ipr_format_res_path(hostrcb->ioa_cfg,
2118                                                 fabric->res_path,
2119                                                 buffer, sizeof(buffer)));
2120                         return;
2121                 }
2122         }
2123
2124         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2125                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126                                     buffer, sizeof(buffer)));
2127 }
2128
2129 static const struct {
2130         u8 type;
2131         char *desc;
2132 } path_type_desc[] = {
2133         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137 };
2138
2139 static const struct {
2140         u8 status;
2141         char *desc;
2142 } path_status_desc[] = {
2143         { IPR_PATH_CFG_NO_PROB, "Functional" },
2144         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145         { IPR_PATH_CFG_FAILED, "Failed" },
2146         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147         { IPR_PATH_NOT_DETECTED, "Missing" },
2148         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149 };
2150
2151 static const char *link_rate[] = {
2152         "unknown",
2153         "disabled",
2154         "phy reset problem",
2155         "spinup hold",
2156         "port selector",
2157         "unknown",
2158         "unknown",
2159         "unknown",
2160         "1.5Gbps",
2161         "3.0Gbps",
2162         "unknown",
2163         "unknown",
2164         "unknown",
2165         "unknown",
2166         "unknown",
2167         "unknown"
2168 };
2169
2170 /**
2171  * ipr_log_path_elem - Log a fabric path element.
2172  * @hostrcb:    hostrcb struct
2173  * @cfg:                fabric path element struct
2174  *
2175  * Return value:
2176  *      none
2177  **/
2178 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179                               struct ipr_hostrcb_config_element *cfg)
2180 {
2181         int i, j;
2182         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185         if (type == IPR_PATH_CFG_NOT_EXIST)
2186                 return;
2187
2188         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189                 if (path_type_desc[i].type != type)
2190                         continue;
2191
2192                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193                         if (path_status_desc[j].status != status)
2194                                 continue;
2195
2196                         if (type == IPR_PATH_CFG_IOA_PORT) {
2197                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198                                              path_status_desc[j].desc, path_type_desc[i].desc,
2199                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201                         } else {
2202                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2205                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207                                 } else if (cfg->cascaded_expander == 0xff) {
2208                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2210                                                      path_type_desc[i].desc, cfg->phy,
2211                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213                                 } else if (cfg->phy == 0xff) {
2214                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2216                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2217                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219                                 } else {
2220                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2222                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225                                 }
2226                         }
2227                         return;
2228                 }
2229         }
2230
2231         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235 }
2236
2237 /**
2238  * ipr_log64_path_elem - Log a fabric path element.
2239  * @hostrcb:    hostrcb struct
2240  * @cfg:                fabric path element struct
2241  *
2242  * Return value:
2243  *      none
2244  **/
2245 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246                                 struct ipr_hostrcb64_config_element *cfg)
2247 {
2248         int i, j;
2249         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252         char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255                 return;
2256
2257         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258                 if (path_type_desc[i].type != type)
2259                         continue;
2260
2261                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262                         if (path_status_desc[j].status != status)
2263                                 continue;
2264
2265                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266                                      path_status_desc[j].desc, path_type_desc[i].desc,
2267                                      ipr_format_res_path(hostrcb->ioa_cfg,
2268                                         cfg->res_path, buffer, sizeof(buffer)),
2269                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270                                         be32_to_cpu(cfg->wwid[0]),
2271                                         be32_to_cpu(cfg->wwid[1]));
2272                         return;
2273                 }
2274         }
2275         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276                      "WWN=%08X%08X\n", cfg->type_status,
2277                      ipr_format_res_path(hostrcb->ioa_cfg,
2278                         cfg->res_path, buffer, sizeof(buffer)),
2279                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2281 }
2282
2283 /**
2284  * ipr_log_fabric_error - Log a fabric error.
2285  * @ioa_cfg:    ioa config struct
2286  * @hostrcb:    hostrcb struct
2287  *
2288  * Return value:
2289  *      none
2290  **/
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292                                  struct ipr_hostrcb *hostrcb)
2293 {
2294         struct ipr_hostrcb_type_20_error *error;
2295         struct ipr_hostrcb_fabric_desc *fabric;
2296         struct ipr_hostrcb_config_element *cfg;
2297         int i, add_len;
2298
2299         error = &hostrcb->hcam.u.error.u.type_20_error;
2300         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303         add_len = be32_to_cpu(hostrcb->hcam.length) -
2304                 (offsetof(struct ipr_hostrcb_error, u) +
2305                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308                 ipr_log_fabric_path(hostrcb, fabric);
2309                 for_each_fabric_cfg(fabric, cfg)
2310                         ipr_log_path_elem(hostrcb, cfg);
2311
2312                 add_len -= be16_to_cpu(fabric->length);
2313                 fabric = (struct ipr_hostrcb_fabric_desc *)
2314                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315         }
2316
2317         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2318 }
2319
2320 /**
2321  * ipr_log_sis64_array_error - Log a sis64 array error.
2322  * @ioa_cfg:    ioa config struct
2323  * @hostrcb:    hostrcb struct
2324  *
2325  * Return value:
2326  *      none
2327  **/
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329                                       struct ipr_hostrcb *hostrcb)
2330 {
2331         int i, num_entries;
2332         struct ipr_hostrcb_type_24_error *error;
2333         struct ipr_hostrcb64_array_data_entry *array_entry;
2334         char buffer[IPR_MAX_RES_PATH_LENGTH];
2335         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337         error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339         ipr_err_separator;
2340
2341         ipr_err("RAID %s Array Configuration: %s\n",
2342                 error->protection_level,
2343                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344                         buffer, sizeof(buffer)));
2345
2346         ipr_err_separator;
2347
2348         array_entry = error->array_member;
2349         num_entries = min_t(u32, error->num_entries,
2350                             ARRAY_SIZE(error->array_member));
2351
2352         for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355                         continue;
2356
2357                 if (error->exposed_mode_adn == i)
2358                         ipr_err("Exposed Array Member %d:\n", i);
2359                 else
2360                         ipr_err("Array Member %d:\n", i);
2361
2362                 ipr_err("Array Member %d:\n", i);
2363                 ipr_log_ext_vpd(&array_entry->vpd);
2364                 ipr_err("Current Location: %s\n",
2365                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366                                 buffer, sizeof(buffer)));
2367                 ipr_err("Expected Location: %s\n",
2368                          ipr_format_res_path(ioa_cfg,
2369                                 array_entry->expected_res_path,
2370                                 buffer, sizeof(buffer)));
2371
2372                 ipr_err_separator;
2373         }
2374 }
2375
2376 /**
2377  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378  * @ioa_cfg:    ioa config struct
2379  * @hostrcb:    hostrcb struct
2380  *
2381  * Return value:
2382  *      none
2383  **/
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385                                        struct ipr_hostrcb *hostrcb)
2386 {
2387         struct ipr_hostrcb_type_30_error *error;
2388         struct ipr_hostrcb64_fabric_desc *fabric;
2389         struct ipr_hostrcb64_config_element *cfg;
2390         int i, add_len;
2391
2392         error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397         add_len = be32_to_cpu(hostrcb->hcam.length) -
2398                 (offsetof(struct ipr_hostrcb64_error, u) +
2399                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402                 ipr_log64_fabric_path(hostrcb, fabric);
2403                 for_each_fabric_cfg(fabric, cfg)
2404                         ipr_log64_path_elem(hostrcb, cfg);
2405
2406                 add_len -= be16_to_cpu(fabric->length);
2407                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409         }
2410
2411         ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2412 }
2413
2414 /**
2415  * ipr_log_generic_error - Log an adapter error.
2416  * @ioa_cfg:    ioa config struct
2417  * @hostrcb:    hostrcb struct
2418  *
2419  * Return value:
2420  *      none
2421  **/
2422 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2423                                   struct ipr_hostrcb *hostrcb)
2424 {
2425         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2426                          be32_to_cpu(hostrcb->hcam.length));
2427 }
2428
2429 /**
2430  * ipr_log_sis64_device_error - Log a cache error.
2431  * @ioa_cfg:    ioa config struct
2432  * @hostrcb:    hostrcb struct
2433  *
2434  * Return value:
2435  *      none
2436  **/
2437 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2438                                          struct ipr_hostrcb *hostrcb)
2439 {
2440         struct ipr_hostrcb_type_21_error *error;
2441         char buffer[IPR_MAX_RES_PATH_LENGTH];
2442
2443         error = &hostrcb->hcam.u.error64.u.type_21_error;
2444
2445         ipr_err("-----Failing Device Information-----\n");
2446         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2448                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2449         ipr_err("Device Resource Path: %s\n",
2450                 __ipr_format_res_path(error->res_path,
2451                                       buffer, sizeof(buffer)));
2452         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2453         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2454         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2455         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2456         ipr_err("SCSI Sense Data:\n");
2457         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2458         ipr_err("SCSI Command Descriptor Block: \n");
2459         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2460
2461         ipr_err("Additional IOA Data:\n");
2462         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2463 }
2464
2465 /**
2466  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2467  * @ioasc:      IOASC
2468  *
2469  * This function will return the index of into the ipr_error_table
2470  * for the specified IOASC. If the IOASC is not in the table,
2471  * 0 will be returned, which points to the entry used for unknown errors.
2472  *
2473  * Return value:
2474  *      index into the ipr_error_table
2475  **/
2476 static u32 ipr_get_error(u32 ioasc)
2477 {
2478         int i;
2479
2480         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2481                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2482                         return i;
2483
2484         return 0;
2485 }
2486
2487 /**
2488  * ipr_handle_log_data - Log an adapter error.
2489  * @ioa_cfg:    ioa config struct
2490  * @hostrcb:    hostrcb struct
2491  *
2492  * This function logs an adapter error to the system.
2493  *
2494  * Return value:
2495  *      none
2496  **/
2497 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2498                                 struct ipr_hostrcb *hostrcb)
2499 {
2500         u32 ioasc;
2501         int error_index;
2502         struct ipr_hostrcb_type_21_error *error;
2503
2504         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2505                 return;
2506
2507         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2508                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2509
2510         if (ioa_cfg->sis64)
2511                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2512         else
2513                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2514
2515         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2516             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2517                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518                 scsi_report_bus_reset(ioa_cfg->host,
2519                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2520         }
2521
2522         error_index = ipr_get_error(ioasc);
2523
2524         if (!ipr_error_table[error_index].log_hcam)
2525                 return;
2526
2527         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2528             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2529                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2530
2531                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2532                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2533                                 return;
2534         }
2535
2536         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2537
2538         /* Set indication we have logged an error */
2539         ioa_cfg->errors_logged++;
2540
2541         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2542                 return;
2543         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2544                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2545
2546         switch (hostrcb->hcam.overlay_id) {
2547         case IPR_HOST_RCB_OVERLAY_ID_2:
2548                 ipr_log_cache_error(ioa_cfg, hostrcb);
2549                 break;
2550         case IPR_HOST_RCB_OVERLAY_ID_3:
2551                 ipr_log_config_error(ioa_cfg, hostrcb);
2552                 break;
2553         case IPR_HOST_RCB_OVERLAY_ID_4:
2554         case IPR_HOST_RCB_OVERLAY_ID_6:
2555                 ipr_log_array_error(ioa_cfg, hostrcb);
2556                 break;
2557         case IPR_HOST_RCB_OVERLAY_ID_7:
2558                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2559                 break;
2560         case IPR_HOST_RCB_OVERLAY_ID_12:
2561                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2562                 break;
2563         case IPR_HOST_RCB_OVERLAY_ID_13:
2564                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2565                 break;
2566         case IPR_HOST_RCB_OVERLAY_ID_14:
2567         case IPR_HOST_RCB_OVERLAY_ID_16:
2568                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2569                 break;
2570         case IPR_HOST_RCB_OVERLAY_ID_17:
2571                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2572                 break;
2573         case IPR_HOST_RCB_OVERLAY_ID_20:
2574                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2575                 break;
2576         case IPR_HOST_RCB_OVERLAY_ID_21:
2577                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2578                 break;
2579         case IPR_HOST_RCB_OVERLAY_ID_23:
2580                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2581                 break;
2582         case IPR_HOST_RCB_OVERLAY_ID_24:
2583         case IPR_HOST_RCB_OVERLAY_ID_26:
2584                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2585                 break;
2586         case IPR_HOST_RCB_OVERLAY_ID_30:
2587                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2588                 break;
2589         case IPR_HOST_RCB_OVERLAY_ID_1:
2590         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2591         default:
2592                 ipr_log_generic_error(ioa_cfg, hostrcb);
2593                 break;
2594         }
2595 }
2596
2597 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2598 {
2599         struct ipr_hostrcb *hostrcb;
2600
2601         hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2602                                         struct ipr_hostrcb, queue);
2603
2604         if (unlikely(!hostrcb)) {
2605                 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2606                 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2607                                                 struct ipr_hostrcb, queue);
2608         }
2609
2610         list_del_init(&hostrcb->queue);
2611         return hostrcb;
2612 }
2613
2614 /**
2615  * ipr_process_error - Op done function for an adapter error log.
2616  * @ipr_cmd:    ipr command struct
2617  *
2618  * This function is the op done function for an error log host
2619  * controlled async from the adapter. It will log the error and
2620  * send the HCAM back to the adapter.
2621  *
2622  * Return value:
2623  *      none
2624  **/
2625 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2626 {
2627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2629         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2630         u32 fd_ioasc;
2631
2632         if (ioa_cfg->sis64)
2633                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2634         else
2635                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2636
2637         list_del_init(&hostrcb->queue);
2638         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2639
2640         if (!ioasc) {
2641                 ipr_handle_log_data(ioa_cfg, hostrcb);
2642                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2643                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2644         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2645                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2646                 dev_err(&ioa_cfg->pdev->dev,
2647                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2648         }
2649
2650         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2651         schedule_work(&ioa_cfg->work_q);
2652         hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2653
2654         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2655 }
2656
2657 /**
2658  * ipr_timeout -  An internally generated op has timed out.
2659  * @ipr_cmd:    ipr command struct
2660  *
2661  * This function blocks host requests and initiates an
2662  * adapter reset.
2663  *
2664  * Return value:
2665  *      none
2666  **/
2667 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2668 {
2669         unsigned long lock_flags = 0;
2670         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672         ENTER;
2673         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675         ioa_cfg->errors_logged++;
2676         dev_err(&ioa_cfg->pdev->dev,
2677                 "Adapter being reset due to command timeout.\n");
2678
2679         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680                 ioa_cfg->sdt_state = GET_DUMP;
2681
2682         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686         LEAVE;
2687 }
2688
2689 /**
2690  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2691  * @ipr_cmd:    ipr command struct
2692  *
2693  * This function blocks host requests and initiates an
2694  * adapter reset.
2695  *
2696  * Return value:
2697  *      none
2698  **/
2699 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2700 {
2701         unsigned long lock_flags = 0;
2702         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2703
2704         ENTER;
2705         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2706
2707         ioa_cfg->errors_logged++;
2708         dev_err(&ioa_cfg->pdev->dev,
2709                 "Adapter timed out transitioning to operational.\n");
2710
2711         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2712                 ioa_cfg->sdt_state = GET_DUMP;
2713
2714         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2715                 if (ipr_fastfail)
2716                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2717                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2718         }
2719
2720         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2721         LEAVE;
2722 }
2723
2724 /**
2725  * ipr_find_ses_entry - Find matching SES in SES table
2726  * @res:        resource entry struct of SES
2727  *
2728  * Return value:
2729  *      pointer to SES table entry / NULL on failure
2730  **/
2731 static const struct ipr_ses_table_entry *
2732 ipr_find_ses_entry(struct ipr_resource_entry *res)
2733 {
2734         int i, j, matches;
2735         struct ipr_std_inq_vpids *vpids;
2736         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2737
2738         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2739                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2740                         if (ste->compare_product_id_byte[j] == 'X') {
2741                                 vpids = &res->std_inq_data.vpids;
2742                                 if (vpids->product_id[j] == ste->product_id[j])
2743                                         matches++;
2744                                 else
2745                                         break;
2746                         } else
2747                                 matches++;
2748                 }
2749
2750                 if (matches == IPR_PROD_ID_LEN)
2751                         return ste;
2752         }
2753
2754         return NULL;
2755 }
2756
2757 /**
2758  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2759  * @ioa_cfg:    ioa config struct
2760  * @bus:                SCSI bus
2761  * @bus_width:  bus width
2762  *
2763  * Return value:
2764  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2765  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2766  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2767  *      max 160MHz = max 320MB/sec).
2768  **/
2769 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2770 {
2771         struct ipr_resource_entry *res;
2772         const struct ipr_ses_table_entry *ste;
2773         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2774
2775         /* Loop through each config table entry in the config table buffer */
2776         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2777                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2778                         continue;
2779
2780                 if (bus != res->bus)
2781                         continue;
2782
2783                 if (!(ste = ipr_find_ses_entry(res)))
2784                         continue;
2785
2786                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2787         }
2788
2789         return max_xfer_rate;
2790 }
2791
2792 /**
2793  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2794  * @ioa_cfg:            ioa config struct
2795  * @max_delay:          max delay in micro-seconds to wait
2796  *
2797  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2798  *
2799  * Return value:
2800  *      0 on success / other on failure
2801  **/
2802 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2803 {
2804         volatile u32 pcii_reg;
2805         int delay = 1;
2806
2807         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2808         while (delay < max_delay) {
2809                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2810
2811                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2812                         return 0;
2813
2814                 /* udelay cannot be used if delay is more than a few milliseconds */
2815                 if ((delay / 1000) > MAX_UDELAY_MS)
2816                         mdelay(delay / 1000);
2817                 else
2818                         udelay(delay);
2819
2820                 delay += delay;
2821         }
2822         return -EIO;
2823 }
2824
2825 /**
2826  * ipr_get_sis64_dump_data_section - Dump IOA memory
2827  * @ioa_cfg:                    ioa config struct
2828  * @start_addr:                 adapter address to dump
2829  * @dest:                       destination kernel buffer
2830  * @length_in_words:            length to dump in 4 byte words
2831  *
2832  * Return value:
2833  *      0 on success
2834  **/
2835 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2836                                            u32 start_addr,
2837                                            __be32 *dest, u32 length_in_words)
2838 {
2839         int i;
2840
2841         for (i = 0; i < length_in_words; i++) {
2842                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2843                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2844                 dest++;
2845         }
2846
2847         return 0;
2848 }
2849
2850 /**
2851  * ipr_get_ldump_data_section - Dump IOA memory
2852  * @ioa_cfg:                    ioa config struct
2853  * @start_addr:                 adapter address to dump
2854  * @dest:                               destination kernel buffer
2855  * @length_in_words:    length to dump in 4 byte words
2856  *
2857  * Return value:
2858  *      0 on success / -EIO on failure
2859  **/
2860 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2861                                       u32 start_addr,
2862                                       __be32 *dest, u32 length_in_words)
2863 {
2864         volatile u32 temp_pcii_reg;
2865         int i, delay = 0;
2866
2867         if (ioa_cfg->sis64)
2868                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2869                                                        dest, length_in_words);
2870
2871         /* Write IOA interrupt reg starting LDUMP state  */
2872         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2873                ioa_cfg->regs.set_uproc_interrupt_reg32);
2874
2875         /* Wait for IO debug acknowledge */
2876         if (ipr_wait_iodbg_ack(ioa_cfg,
2877                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2878                 dev_err(&ioa_cfg->pdev->dev,
2879                         "IOA dump long data transfer timeout\n");
2880                 return -EIO;
2881         }
2882
2883         /* Signal LDUMP interlocked - clear IO debug ack */
2884         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2885                ioa_cfg->regs.clr_interrupt_reg);
2886
2887         /* Write Mailbox with starting address */
2888         writel(start_addr, ioa_cfg->ioa_mailbox);
2889
2890         /* Signal address valid - clear IOA Reset alert */
2891         writel(IPR_UPROCI_RESET_ALERT,
2892                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2893
2894         for (i = 0; i < length_in_words; i++) {
2895                 /* Wait for IO debug acknowledge */
2896                 if (ipr_wait_iodbg_ack(ioa_cfg,
2897                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2898                         dev_err(&ioa_cfg->pdev->dev,
2899                                 "IOA dump short data transfer timeout\n");
2900                         return -EIO;
2901                 }
2902
2903                 /* Read data from mailbox and increment destination pointer */
2904                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2905                 dest++;
2906
2907                 /* For all but the last word of data, signal data received */
2908                 if (i < (length_in_words - 1)) {
2909                         /* Signal dump data received - Clear IO debug Ack */
2910                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2911                                ioa_cfg->regs.clr_interrupt_reg);
2912                 }
2913         }
2914
2915         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2916         writel(IPR_UPROCI_RESET_ALERT,
2917                ioa_cfg->regs.set_uproc_interrupt_reg32);
2918
2919         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2920                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2921
2922         /* Signal dump data received - Clear IO debug Ack */
2923         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2924                ioa_cfg->regs.clr_interrupt_reg);
2925
2926         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2927         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2928                 temp_pcii_reg =
2929                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2930
2931                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2932                         return 0;
2933
2934                 udelay(10);
2935                 delay += 10;
2936         }
2937
2938         return 0;
2939 }
2940
2941 #ifdef CONFIG_SCSI_IPR_DUMP
2942 /**
2943  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2944  * @ioa_cfg:            ioa config struct
2945  * @pci_address:        adapter address
2946  * @length:                     length of data to copy
2947  *
2948  * Copy data from PCI adapter to kernel buffer.
2949  * Note: length MUST be a 4 byte multiple
2950  * Return value:
2951  *      0 on success / other on failure
2952  **/
2953 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2954                         unsigned long pci_address, u32 length)
2955 {
2956         int bytes_copied = 0;
2957         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2958         __be32 *page;
2959         unsigned long lock_flags = 0;
2960         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2961
2962         if (ioa_cfg->sis64)
2963                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2964         else
2965                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2966
2967         while (bytes_copied < length &&
2968                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2969                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2970                     ioa_dump->page_offset == 0) {
2971                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2972
2973                         if (!page) {
2974                                 ipr_trace;
2975                                 return bytes_copied;
2976                         }
2977
2978                         ioa_dump->page_offset = 0;
2979                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2980                         ioa_dump->next_page_index++;
2981                 } else
2982                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2983
2984                 rem_len = length - bytes_copied;
2985                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2986                 cur_len = min(rem_len, rem_page_len);
2987
2988                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2990                         rc = -EIO;
2991                 } else {
2992                         rc = ipr_get_ldump_data_section(ioa_cfg,
2993                                                         pci_address + bytes_copied,
2994                                                         &page[ioa_dump->page_offset / 4],
2995                                                         (cur_len / sizeof(u32)));
2996                 }
2997                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2998
2999                 if (!rc) {
3000                         ioa_dump->page_offset += cur_len;
3001                         bytes_copied += cur_len;
3002                 } else {
3003                         ipr_trace;
3004                         break;
3005                 }
3006                 schedule();
3007         }
3008
3009         return bytes_copied;
3010 }
3011
3012 /**
3013  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3014  * @hdr:        dump entry header struct
3015  *
3016  * Return value:
3017  *      nothing
3018  **/
3019 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3020 {
3021         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3022         hdr->num_elems = 1;
3023         hdr->offset = sizeof(*hdr);
3024         hdr->status = IPR_DUMP_STATUS_SUCCESS;
3025 }
3026
3027 /**
3028  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3029  * @ioa_cfg:    ioa config struct
3030  * @driver_dump:        driver dump struct
3031  *
3032  * Return value:
3033  *      nothing
3034  **/
3035 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3036                                    struct ipr_driver_dump *driver_dump)
3037 {
3038         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3039
3040         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3041         driver_dump->ioa_type_entry.hdr.len =
3042                 sizeof(struct ipr_dump_ioa_type_entry) -
3043                 sizeof(struct ipr_dump_entry_header);
3044         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3045         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3046         driver_dump->ioa_type_entry.type = ioa_cfg->type;
3047         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3048                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3049                 ucode_vpd->minor_release[1];
3050         driver_dump->hdr.num_entries++;
3051 }
3052
3053 /**
3054  * ipr_dump_version_data - Fill in the driver version in the dump.
3055  * @ioa_cfg:    ioa config struct
3056  * @driver_dump:        driver dump struct
3057  *
3058  * Return value:
3059  *      nothing
3060  **/
3061 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3062                                   struct ipr_driver_dump *driver_dump)
3063 {
3064         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3065         driver_dump->version_entry.hdr.len =
3066                 sizeof(struct ipr_dump_version_entry) -
3067                 sizeof(struct ipr_dump_entry_header);
3068         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3069         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3070         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3071         driver_dump->hdr.num_entries++;
3072 }
3073
3074 /**
3075  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3076  * @ioa_cfg:    ioa config struct
3077  * @driver_dump:        driver dump struct
3078  *
3079  * Return value:
3080  *      nothing
3081  **/
3082 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3083                                    struct ipr_driver_dump *driver_dump)
3084 {
3085         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3086         driver_dump->trace_entry.hdr.len =
3087                 sizeof(struct ipr_dump_trace_entry) -
3088                 sizeof(struct ipr_dump_entry_header);
3089         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3090         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3091         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3092         driver_dump->hdr.num_entries++;
3093 }
3094
3095 /**
3096  * ipr_dump_location_data - Fill in the IOA location in the dump.
3097  * @ioa_cfg:    ioa config struct
3098  * @driver_dump:        driver dump struct
3099  *
3100  * Return value:
3101  *      nothing
3102  **/
3103 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3104                                    struct ipr_driver_dump *driver_dump)
3105 {
3106         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3107         driver_dump->location_entry.hdr.len =
3108                 sizeof(struct ipr_dump_location_entry) -
3109                 sizeof(struct ipr_dump_entry_header);
3110         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3111         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3112         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3113         driver_dump->hdr.num_entries++;
3114 }
3115
3116 /**
3117  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3118  * @ioa_cfg:    ioa config struct
3119  * @dump:               dump struct
3120  *
3121  * Return value:
3122  *      nothing
3123  **/
3124 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3125 {
3126         unsigned long start_addr, sdt_word;
3127         unsigned long lock_flags = 0;
3128         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3129         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3130         u32 num_entries, max_num_entries, start_off, end_off;
3131         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3132         struct ipr_sdt *sdt;
3133         int valid = 1;
3134         int i;
3135
3136         ENTER;
3137
3138         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3139
3140         if (ioa_cfg->sdt_state != READ_DUMP) {
3141                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142                 return;
3143         }
3144
3145         if (ioa_cfg->sis64) {
3146                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3147                 ssleep(IPR_DUMP_DELAY_SECONDS);
3148                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3149         }
3150
3151         start_addr = readl(ioa_cfg->ioa_mailbox);
3152
3153         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3154                 dev_err(&ioa_cfg->pdev->dev,
3155                         "Invalid dump table format: %lx\n", start_addr);
3156                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3157                 return;
3158         }
3159
3160         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3161
3162         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3163
3164         /* Initialize the overall dump header */
3165         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3166         driver_dump->hdr.num_entries = 1;
3167         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3168         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3169         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3170         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3171
3172         ipr_dump_version_data(ioa_cfg, driver_dump);
3173         ipr_dump_location_data(ioa_cfg, driver_dump);
3174         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3175         ipr_dump_trace_data(ioa_cfg, driver_dump);
3176
3177         /* Update dump_header */
3178         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3179
3180         /* IOA Dump entry */
3181         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3182         ioa_dump->hdr.len = 0;
3183         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3184         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3185
3186         /* First entries in sdt are actually a list of dump addresses and
3187          lengths to gather the real dump data.  sdt represents the pointer
3188          to the ioa generated dump table.  Dump data will be extracted based
3189          on entries in this table */
3190         sdt = &ioa_dump->sdt;
3191
3192         if (ioa_cfg->sis64) {
3193                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3194                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3195         } else {
3196                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3197                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3198         }
3199
3200         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3201                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3202         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3203                                         bytes_to_copy / sizeof(__be32));
3204
3205         /* Smart Dump table is ready to use and the first entry is valid */
3206         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3207             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3208                 dev_err(&ioa_cfg->pdev->dev,
3209                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3210                         rc, be32_to_cpu(sdt->hdr.state));
3211                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3212                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3213                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3214                 return;
3215         }
3216
3217         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3218
3219         if (num_entries > max_num_entries)
3220                 num_entries = max_num_entries;
3221
3222         /* Update dump length to the actual data to be copied */
3223         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3224         if (ioa_cfg->sis64)
3225                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3226         else
3227                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3228
3229         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231         for (i = 0; i < num_entries; i++) {
3232                 if (ioa_dump->hdr.len > max_dump_size) {
3233                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3234                         break;
3235                 }
3236
3237                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3238                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3239                         if (ioa_cfg->sis64)
3240                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3241                         else {
3242                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3243                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3244
3245                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3246                                         bytes_to_copy = end_off - start_off;
3247                                 else
3248                                         valid = 0;
3249                         }
3250                         if (valid) {
3251                                 if (bytes_to_copy > max_dump_size) {
3252                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3253                                         continue;
3254                                 }
3255
3256                                 /* Copy data from adapter to driver buffers */
3257                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3258                                                             bytes_to_copy);
3259
3260                                 ioa_dump->hdr.len += bytes_copied;
3261
3262                                 if (bytes_copied != bytes_to_copy) {
3263                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3264                                         break;
3265                                 }
3266                         }
3267                 }
3268         }
3269
3270         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3271
3272         /* Update dump_header */
3273         driver_dump->hdr.len += ioa_dump->hdr.len;
3274         wmb();
3275         ioa_cfg->sdt_state = DUMP_OBTAINED;
3276         LEAVE;
3277 }
3278
3279 #else
3280 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3281 #endif
3282
3283 /**
3284  * ipr_release_dump - Free adapter dump memory
3285  * @kref:       kref struct
3286  *
3287  * Return value:
3288  *      nothing
3289  **/
3290 static void ipr_release_dump(struct kref *kref)
3291 {
3292         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3293         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3294         unsigned long lock_flags = 0;
3295         int i;
3296
3297         ENTER;
3298         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3299         ioa_cfg->dump = NULL;
3300         ioa_cfg->sdt_state = INACTIVE;
3301         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3302
3303         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3304                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3305
3306         vfree(dump->ioa_dump.ioa_data);
3307         kfree(dump);
3308         LEAVE;
3309 }
3310
3311 /**
3312  * ipr_worker_thread - Worker thread
3313  * @work:               ioa config struct
3314  *
3315  * Called at task level from a work thread. This function takes care
3316  * of adding and removing device from the mid-layer as configuration
3317  * changes are detected by the adapter.
3318  *
3319  * Return value:
3320  *      nothing
3321  **/
3322 static void ipr_worker_thread(struct work_struct *work)
3323 {
3324         unsigned long lock_flags;
3325         struct ipr_resource_entry *res;
3326         struct scsi_device *sdev;
3327         struct ipr_dump *dump;
3328         struct ipr_ioa_cfg *ioa_cfg =
3329                 container_of(work, struct ipr_ioa_cfg, work_q);
3330         u8 bus, target, lun;
3331         int did_work;
3332
3333         ENTER;
3334         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335
3336         if (ioa_cfg->sdt_state == READ_DUMP) {
3337                 dump = ioa_cfg->dump;
3338                 if (!dump) {
3339                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3340                         return;
3341                 }
3342                 kref_get(&dump->kref);
3343                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3344                 ipr_get_ioa_dump(ioa_cfg, dump);
3345                 kref_put(&dump->kref, ipr_release_dump);
3346
3347                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3349                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3350                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3351                 return;
3352         }
3353
3354         if (!ioa_cfg->scan_enabled) {
3355                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356                 return;
3357         }
3358
3359 restart:
3360         do {
3361                 did_work = 0;
3362                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3363                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3364                         return;
3365                 }
3366
3367                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3368                         if (res->del_from_ml && res->sdev) {
3369                                 did_work = 1;
3370                                 sdev = res->sdev;
3371                                 if (!scsi_device_get(sdev)) {
3372                                         if (!res->add_to_ml)
3373                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3374                                         else
3375                                                 res->del_from_ml = 0;
3376                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377                                         scsi_remove_device(sdev);
3378                                         scsi_device_put(sdev);
3379                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3380                                 }
3381                                 break;
3382                         }
3383                 }
3384         } while (did_work);
3385
3386         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3387                 if (res->add_to_ml) {
3388                         bus = res->bus;
3389                         target = res->target;
3390                         lun = res->lun;
3391                         res->add_to_ml = 0;
3392                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3394                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3395                         goto restart;
3396                 }
3397         }
3398
3399         ioa_cfg->scan_done = 1;
3400         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3401         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3402         LEAVE;
3403 }
3404
3405 #ifdef CONFIG_SCSI_IPR_TRACE
3406 /**
3407  * ipr_read_trace - Dump the adapter trace
3408  * @filp:               open sysfs file
3409  * @kobj:               kobject struct
3410  * @bin_attr:           bin_attribute struct
3411  * @buf:                buffer
3412  * @off:                offset
3413  * @count:              buffer size
3414  *
3415  * Return value:
3416  *      number of bytes printed to buffer
3417  **/
3418 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3419                               struct bin_attribute *bin_attr,
3420                               char *buf, loff_t off, size_t count)
3421 {
3422         struct device *dev = container_of(kobj, struct device, kobj);
3423         struct Scsi_Host *shost = class_to_shost(dev);
3424         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3425         unsigned long lock_flags = 0;
3426         ssize_t ret;
3427
3428         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3429         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3430                                 IPR_TRACE_SIZE);
3431         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432
3433         return ret;
3434 }
3435
3436 static struct bin_attribute ipr_trace_attr = {
3437         .attr = {
3438                 .name = "trace",
3439                 .mode = S_IRUGO,
3440         },
3441         .size = 0,
3442         .read = ipr_read_trace,
3443 };
3444 #endif
3445
3446 /**
3447  * ipr_show_fw_version - Show the firmware version
3448  * @dev:        class device struct
3449  * @buf:        buffer
3450  *
3451  * Return value:
3452  *      number of bytes printed to buffer
3453  **/
3454 static ssize_t ipr_show_fw_version(struct device *dev,
3455                                    struct device_attribute *attr, char *buf)
3456 {
3457         struct Scsi_Host *shost = class_to_shost(dev);
3458         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3459         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3460         unsigned long lock_flags = 0;
3461         int len;
3462
3463         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3464         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3465                        ucode_vpd->major_release, ucode_vpd->card_type,
3466                        ucode_vpd->minor_release[0],
3467                        ucode_vpd->minor_release[1]);
3468         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3469         return len;
3470 }
3471
3472 static struct device_attribute ipr_fw_version_attr = {
3473         .attr = {
3474                 .name =         "fw_version",
3475                 .mode =         S_IRUGO,
3476         },
3477         .show = ipr_show_fw_version,
3478 };
3479
3480 /**
3481  * ipr_show_log_level - Show the adapter's error logging level
3482  * @dev:        class device struct
3483  * @buf:        buffer
3484  *
3485  * Return value:
3486  *      number of bytes printed to buffer
3487  **/
3488 static ssize_t ipr_show_log_level(struct device *dev,
3489                                    struct device_attribute *attr, char *buf)
3490 {
3491         struct Scsi_Host *shost = class_to_shost(dev);
3492         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3493         unsigned long lock_flags = 0;
3494         int len;
3495
3496         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3497         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3498         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3499         return len;
3500 }
3501
3502 /**
3503  * ipr_store_log_level - Change the adapter's error logging level
3504  * @dev:        class device struct
3505  * @buf:        buffer
3506  *
3507  * Return value:
3508  *      number of bytes printed to buffer
3509  **/
3510 static ssize_t ipr_store_log_level(struct device *dev,
3511                                    struct device_attribute *attr,
3512                                    const char *buf, size_t count)
3513 {
3514         struct Scsi_Host *shost = class_to_shost(dev);
3515         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3516         unsigned long lock_flags = 0;
3517
3518         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3519         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3520         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521         return strlen(buf);
3522 }
3523
3524 static struct device_attribute ipr_log_level_attr = {
3525         .attr = {
3526                 .name =         "log_level",
3527                 .mode =         S_IRUGO | S_IWUSR,
3528         },
3529         .show = ipr_show_log_level,
3530         .store = ipr_store_log_level
3531 };
3532
3533 /**
3534  * ipr_store_diagnostics - IOA Diagnostics interface
3535  * @dev:        device struct
3536  * @buf:        buffer
3537  * @count:      buffer size
3538  *
3539  * This function will reset the adapter and wait a reasonable
3540  * amount of time for any errors that the adapter might log.
3541  *
3542  * Return value:
3543  *      count on success / other on failure
3544  **/
3545 static ssize_t ipr_store_diagnostics(struct device *dev,
3546                                      struct device_attribute *attr,
3547                                      const char *buf, size_t count)
3548 {
3549         struct Scsi_Host *shost = class_to_shost(dev);
3550         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3551         unsigned long lock_flags = 0;
3552         int rc = count;
3553
3554         if (!capable(CAP_SYS_ADMIN))
3555                 return -EACCES;
3556
3557         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3558         while (ioa_cfg->in_reset_reload) {
3559                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3560                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3561                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3562         }
3563
3564         ioa_cfg->errors_logged = 0;
3565         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3566
3567         if (ioa_cfg->in_reset_reload) {
3568                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3569                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3570
3571                 /* Wait for a second for any errors to be logged */
3572                 msleep(1000);
3573         } else {
3574                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3575                 return -EIO;
3576         }
3577
3578         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3579         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3580                 rc = -EIO;
3581         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582
3583         return rc;
3584 }
3585
3586 static struct device_attribute ipr_diagnostics_attr = {
3587         .attr = {
3588                 .name =         "run_diagnostics",
3589                 .mode =         S_IWUSR,
3590         },
3591         .store = ipr_store_diagnostics
3592 };
3593
3594 /**
3595  * ipr_show_adapter_state - Show the adapter's state
3596  * @class_dev:  device struct
3597  * @buf:        buffer
3598  *
3599  * Return value:
3600  *      number of bytes printed to buffer
3601  **/
3602 static ssize_t ipr_show_adapter_state(struct device *dev,
3603                                       struct device_attribute *attr, char *buf)
3604 {
3605         struct Scsi_Host *shost = class_to_shost(dev);
3606         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3607         unsigned long lock_flags = 0;
3608         int len;
3609
3610         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3611         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3612                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3613         else
3614                 len = snprintf(buf, PAGE_SIZE, "online\n");
3615         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3616         return len;
3617 }
3618
3619 /**
3620  * ipr_store_adapter_state - Change adapter state
3621  * @dev:        device struct
3622  * @buf:        buffer
3623  * @count:      buffer size
3624  *
3625  * This function will change the adapter's state.
3626  *
3627  * Return value:
3628  *      count on success / other on failure
3629  **/
3630 static ssize_t ipr_store_adapter_state(struct device *dev,
3631                                        struct device_attribute *attr,
3632                                        const char *buf, size_t count)
3633 {
3634         struct Scsi_Host *shost = class_to_shost(dev);
3635         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3636         unsigned long lock_flags;
3637         int result = count, i;
3638
3639         if (!capable(CAP_SYS_ADMIN))
3640                 return -EACCES;
3641
3642         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3643         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3644             !strncmp(buf, "online", 6)) {
3645                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3646                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3647                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3648                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3649                 }
3650                 wmb();
3651                 ioa_cfg->reset_retries = 0;
3652                 ioa_cfg->in_ioa_bringdown = 0;
3653                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3654         }
3655         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3656         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3657
3658         return result;
3659 }
3660
3661 static struct device_attribute ipr_ioa_state_attr = {
3662         .attr = {
3663                 .name =         "online_state",
3664                 .mode =         S_IRUGO | S_IWUSR,
3665         },
3666         .show = ipr_show_adapter_state,
3667         .store = ipr_store_adapter_state
3668 };
3669
3670 /**
3671  * ipr_store_reset_adapter - Reset the adapter
3672  * @dev:        device struct
3673  * @buf:        buffer
3674  * @count:      buffer size
3675  *
3676  * This function will reset the adapter.
3677  *
3678  * Return value:
3679  *      count on success / other on failure
3680  **/
3681 static ssize_t ipr_store_reset_adapter(struct device *dev,
3682                                        struct device_attribute *attr,
3683                                        const char *buf, size_t count)
3684 {
3685         struct Scsi_Host *shost = class_to_shost(dev);
3686         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3687         unsigned long lock_flags;
3688         int result = count;
3689
3690         if (!capable(CAP_SYS_ADMIN))
3691                 return -EACCES;
3692
3693         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3694         if (!ioa_cfg->in_reset_reload)
3695                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3696         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3697         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3698
3699         return result;
3700 }
3701
3702 static struct device_attribute ipr_ioa_reset_attr = {
3703         .attr = {
3704                 .name =         "reset_host",
3705                 .mode =         S_IWUSR,
3706         },
3707         .store = ipr_store_reset_adapter
3708 };
3709
3710 static int ipr_iopoll(struct irq_poll *iop, int budget);
3711  /**
3712  * ipr_show_iopoll_weight - Show ipr polling mode
3713  * @dev:        class device struct
3714  * @buf:        buffer
3715  *
3716  * Return value:
3717  *      number of bytes printed to buffer
3718  **/
3719 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3720                                    struct device_attribute *attr, char *buf)
3721 {
3722         struct Scsi_Host *shost = class_to_shost(dev);
3723         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3724         unsigned long lock_flags = 0;
3725         int len;
3726
3727         spin_lock_irqsave(shost->host_lock, lock_flags);
3728         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3729         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3730
3731         return len;
3732 }
3733
3734 /**
3735  * ipr_store_iopoll_weight - Change the adapter's polling mode
3736  * @dev:        class device struct
3737  * @buf:        buffer
3738  *
3739  * Return value:
3740  *      number of bytes printed to buffer
3741  **/
3742 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3743                                         struct device_attribute *attr,
3744                                         const char *buf, size_t count)
3745 {
3746         struct Scsi_Host *shost = class_to_shost(dev);
3747         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3748         unsigned long user_iopoll_weight;
3749         unsigned long lock_flags = 0;
3750         int i;
3751
3752         if (!ioa_cfg->sis64) {
3753                 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3754                 return -EINVAL;
3755         }
3756         if (kstrtoul(buf, 10, &user_iopoll_weight))
3757                 return -EINVAL;
3758
3759         if (user_iopoll_weight > 256) {
3760                 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3761                 return -EINVAL;
3762         }
3763
3764         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3765                 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3766                 return strlen(buf);
3767         }
3768
3769         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3770                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3771                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3772         }
3773
3774         spin_lock_irqsave(shost->host_lock, lock_flags);
3775         ioa_cfg->iopoll_weight = user_iopoll_weight;
3776         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3777                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3778                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3779                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3780                 }
3781         }
3782         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3783
3784         return strlen(buf);
3785 }
3786
3787 static struct device_attribute ipr_iopoll_weight_attr = {
3788         .attr = {
3789                 .name =         "iopoll_weight",
3790                 .mode =         S_IRUGO | S_IWUSR,
3791         },
3792         .show = ipr_show_iopoll_weight,
3793         .store = ipr_store_iopoll_weight
3794 };
3795
3796 /**
3797  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3798  * @buf_len:            buffer length
3799  *
3800  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3801  * list to use for microcode download
3802  *
3803  * Return value:
3804  *      pointer to sglist / NULL on failure
3805  **/
3806 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3807 {
3808         int sg_size, order, bsize_elem, num_elem, i, j;
3809         struct ipr_sglist *sglist;
3810         struct scatterlist *scatterlist;
3811         struct page *page;
3812
3813         /* Get the minimum size per scatter/gather element */
3814         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3815
3816         /* Get the actual size per element */
3817         order = get_order(sg_size);
3818
3819         /* Determine the actual number of bytes per element */
3820         bsize_elem = PAGE_SIZE * (1 << order);
3821
3822         /* Determine the actual number of sg entries needed */
3823         if (buf_len % bsize_elem)
3824                 num_elem = (buf_len / bsize_elem) + 1;
3825         else
3826                 num_elem = buf_len / bsize_elem;
3827
3828         /* Allocate a scatter/gather list for the DMA */
3829         sglist = kzalloc(sizeof(struct ipr_sglist) +
3830                          (sizeof(struct scatterlist) * (num_elem - 1)),
3831                          GFP_KERNEL);
3832
3833         if (sglist == NULL) {
3834                 ipr_trace;
3835                 return NULL;
3836         }
3837
3838         scatterlist = sglist->scatterlist;
3839         sg_init_table(scatterlist, num_elem);
3840
3841         sglist->order = order;
3842         sglist->num_sg = num_elem;
3843
3844         /* Allocate a bunch of sg elements */
3845         for (i = 0; i < num_elem; i++) {
3846                 page = alloc_pages(GFP_KERNEL, order);
3847                 if (!page) {
3848                         ipr_trace;
3849
3850                         /* Free up what we already allocated */
3851                         for (j = i - 1; j >= 0; j--)
3852                                 __free_pages(sg_page(&scatterlist[j]), order);
3853                         kfree(sglist);
3854                         return NULL;
3855                 }
3856
3857                 sg_set_page(&scatterlist[i], page, 0, 0);
3858         }
3859
3860         return sglist;
3861 }
3862
3863 /**
3864  * ipr_free_ucode_buffer - Frees a microcode download buffer
3865  * @p_dnld:             scatter/gather list pointer
3866  *
3867  * Free a DMA'able ucode download buffer previously allocated with
3868  * ipr_alloc_ucode_buffer
3869  *
3870  * Return value:
3871  *      nothing
3872  **/
3873 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3874 {
3875         int i;
3876
3877         for (i = 0; i < sglist->num_sg; i++)
3878                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3879
3880         kfree(sglist);
3881 }
3882
3883 /**
3884  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3885  * @sglist:             scatter/gather list pointer
3886  * @buffer:             buffer pointer
3887  * @len:                buffer length
3888  *
3889  * Copy a microcode image from a user buffer into a buffer allocated by
3890  * ipr_alloc_ucode_buffer
3891  *
3892  * Return value:
3893  *      0 on success / other on failure
3894  **/
3895 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3896                                  u8 *buffer, u32 len)
3897 {
3898         int bsize_elem, i, result = 0;
3899         struct scatterlist *scatterlist;
3900         void *kaddr;
3901
3902         /* Determine the actual number of bytes per element */
3903         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3904
3905         scatterlist = sglist->scatterlist;
3906
3907         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3908                 struct page *page = sg_page(&scatterlist[i]);
3909
3910                 kaddr = kmap(page);
3911                 memcpy(kaddr, buffer, bsize_elem);
3912                 kunmap(page);
3913
3914                 scatterlist[i].length = bsize_elem;
3915
3916                 if (result != 0) {
3917                         ipr_trace;
3918                         return result;
3919                 }
3920         }
3921
3922         if (len % bsize_elem) {
3923                 struct page *page = sg_page(&scatterlist[i]);
3924
3925                 kaddr = kmap(page);
3926                 memcpy(kaddr, buffer, len % bsize_elem);
3927                 kunmap(page);
3928
3929                 scatterlist[i].length = len % bsize_elem;
3930         }
3931
3932         sglist->buffer_len = len;
3933         return result;
3934 }
3935
3936 /**
3937  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3938  * @ipr_cmd:            ipr command struct
3939  * @sglist:             scatter/gather list
3940  *
3941  * Builds a microcode download IOA data list (IOADL).
3942  *
3943  **/
3944 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3945                                     struct ipr_sglist *sglist)
3946 {
3947         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3948         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3949         struct scatterlist *scatterlist = sglist->scatterlist;
3950         int i;
3951
3952         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3953         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3954         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3955
3956         ioarcb->ioadl_len =
3957                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3958         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3959                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3960                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3961                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3962         }
3963
3964         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3965 }
3966
3967 /**
3968  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3969  * @ipr_cmd:    ipr command struct
3970  * @sglist:             scatter/gather list
3971  *
3972  * Builds a microcode download IOA data list (IOADL).
3973  *
3974  **/
3975 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3976                                   struct ipr_sglist *sglist)
3977 {
3978         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3979         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3980         struct scatterlist *scatterlist = sglist->scatterlist;
3981         int i;
3982
3983         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3984         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3985         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3986
3987         ioarcb->ioadl_len =
3988                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3989
3990         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3991                 ioadl[i].flags_and_data_len =
3992                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3993                 ioadl[i].address =
3994                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3995         }
3996
3997         ioadl[i-1].flags_and_data_len |=
3998                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3999 }
4000
4001 /**
4002  * ipr_update_ioa_ucode - Update IOA's microcode
4003  * @ioa_cfg:    ioa config struct
4004  * @sglist:             scatter/gather list
4005  *
4006  * Initiate an adapter reset to update the IOA's microcode
4007  *
4008  * Return value:
4009  *      0 on success / -EIO on failure
4010  **/
4011 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4012                                 struct ipr_sglist *sglist)
4013 {
4014         unsigned long lock_flags;
4015
4016         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4017         while (ioa_cfg->in_reset_reload) {
4018                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4019                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4020                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4021         }
4022
4023         if (ioa_cfg->ucode_sglist) {
4024                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4025                 dev_err(&ioa_cfg->pdev->dev,
4026                         "Microcode download already in progress\n");
4027                 return -EIO;
4028         }
4029
4030         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4031                                         sglist->scatterlist, sglist->num_sg,
4032                                         DMA_TO_DEVICE);
4033
4034         if (!sglist->num_dma_sg) {
4035                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4036                 dev_err(&ioa_cfg->pdev->dev,
4037                         "Failed to map microcode download buffer!\n");
4038                 return -EIO;
4039         }
4040
4041         ioa_cfg->ucode_sglist = sglist;
4042         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4043         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4045
4046         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4047         ioa_cfg->ucode_sglist = NULL;
4048         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4049         return 0;
4050 }
4051
4052 /**
4053  * ipr_store_update_fw - Update the firmware on the adapter
4054  * @class_dev:  device struct
4055  * @buf:        buffer
4056  * @count:      buffer size
4057  *
4058  * This function will update the firmware on the adapter.
4059  *
4060  * Return value:
4061  *      count on success / other on failure
4062  **/
4063 static ssize_t ipr_store_update_fw(struct device *dev,
4064                                    struct device_attribute *attr,
4065                                    const char *buf, size_t count)
4066 {
4067         struct Scsi_Host *shost = class_to_shost(dev);
4068         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4069         struct ipr_ucode_image_header *image_hdr;
4070         const struct firmware *fw_entry;
4071         struct ipr_sglist *sglist;
4072         char fname[100];
4073         char *src;
4074         char *endline;
4075         int result, dnld_size;
4076
4077         if (!capable(CAP_SYS_ADMIN))
4078                 return -EACCES;
4079
4080         snprintf(fname, sizeof(fname), "%s", buf);
4081
4082         endline = strchr(fname, '\n');
4083         if (endline)
4084                 *endline = '\0';
4085
4086         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4087                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4088                 return -EIO;
4089         }
4090
4091         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4092
4093         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4094         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4095         sglist = ipr_alloc_ucode_buffer(dnld_size);
4096
4097         if (!sglist) {
4098                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4099                 release_firmware(fw_entry);
4100                 return -ENOMEM;
4101         }
4102
4103         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4104
4105         if (result) {
4106                 dev_err(&ioa_cfg->pdev->dev,
4107                         "Microcode buffer copy to DMA buffer failed\n");
4108                 goto out;
4109         }
4110
4111         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4112
4113         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4114
4115         if (!result)
4116                 result = count;
4117 out:
4118         ipr_free_ucode_buffer(sglist);
4119         release_firmware(fw_entry);
4120         return result;
4121 }
4122
4123 static struct device_attribute ipr_update_fw_attr = {
4124         .attr = {
4125                 .name =         "update_fw",
4126                 .mode =         S_IWUSR,
4127         },
4128         .store = ipr_store_update_fw
4129 };
4130
4131 /**
4132  * ipr_show_fw_type - Show the adapter's firmware type.
4133  * @dev:        class device struct
4134  * @buf:        buffer
4135  *
4136  * Return value:
4137  *      number of bytes printed to buffer
4138  **/
4139 static ssize_t ipr_show_fw_type(struct device *dev,
4140                                 struct device_attribute *attr, char *buf)
4141 {
4142         struct Scsi_Host *shost = class_to_shost(dev);
4143         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4144         unsigned long lock_flags = 0;
4145         int len;
4146
4147         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4148         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4149         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4150         return len;
4151 }
4152
4153 static struct device_attribute ipr_ioa_fw_type_attr = {
4154         .attr = {
4155                 .name =         "fw_type",
4156                 .mode =         S_IRUGO,
4157         },
4158         .show = ipr_show_fw_type
4159 };
4160
4161 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4162                                 struct bin_attribute *bin_attr, char *buf,
4163                                 loff_t off, size_t count)
4164 {
4165         struct device *cdev = container_of(kobj, struct device, kobj);
4166         struct Scsi_Host *shost = class_to_shost(cdev);
4167         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4168         struct ipr_hostrcb *hostrcb;
4169         unsigned long lock_flags = 0;
4170         int ret;
4171
4172         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4173         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4174                                         struct ipr_hostrcb, queue);
4175         if (!hostrcb) {
4176                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4177                 return 0;
4178         }
4179         ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4180                                 sizeof(hostrcb->hcam));
4181         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4182         return ret;
4183 }
4184
4185 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4186                                 struct bin_attribute *bin_attr, char *buf,
4187                                 loff_t off, size_t count)
4188 {
4189         struct device *cdev = container_of(kobj, struct device, kobj);
4190         struct Scsi_Host *shost = class_to_shost(cdev);
4191         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4192         struct ipr_hostrcb *hostrcb;
4193         unsigned long lock_flags = 0;
4194
4195         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4196         hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4197                                         struct ipr_hostrcb, queue);
4198         if (!hostrcb) {
4199                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4200                 return count;
4201         }
4202
4203         /* Reclaim hostrcb before exit */
4204         list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4205         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4206         return count;
4207 }
4208
4209 static struct bin_attribute ipr_ioa_async_err_log = {
4210         .attr = {
4211                 .name =         "async_err_log",
4212                 .mode =         S_IRUGO | S_IWUSR,
4213         },
4214         .size = 0,
4215         .read = ipr_read_async_err_log,
4216         .write = ipr_next_async_err_log
4217 };
4218
4219 static struct device_attribute *ipr_ioa_attrs[] = {
4220         &ipr_fw_version_attr,
4221         &ipr_log_level_attr,
4222         &ipr_diagnostics_attr,
4223         &ipr_ioa_state_attr,
4224         &ipr_ioa_reset_attr,
4225         &ipr_update_fw_attr,
4226         &ipr_ioa_fw_type_attr,
4227         &ipr_iopoll_weight_attr,
4228         NULL,
4229 };
4230
4231 #ifdef CONFIG_SCSI_IPR_DUMP
4232 /**
4233  * ipr_read_dump - Dump the adapter
4234  * @filp:               open sysfs file
4235  * @kobj:               kobject struct
4236  * @bin_attr:           bin_attribute struct
4237  * @buf:                buffer
4238  * @off:                offset
4239  * @count:              buffer size
4240  *
4241  * Return value:
4242  *      number of bytes printed to buffer
4243  **/
4244 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4245                              struct bin_attribute *bin_attr,
4246                              char *buf, loff_t off, size_t count)
4247 {
4248         struct device *cdev = container_of(kobj, struct device, kobj);
4249         struct Scsi_Host *shost = class_to_shost(cdev);
4250         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4251         struct ipr_dump *dump;
4252         unsigned long lock_flags = 0;
4253         char *src;
4254         int len, sdt_end;
4255         size_t rc = count;
4256
4257         if (!capable(CAP_SYS_ADMIN))
4258                 return -EACCES;
4259
4260         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4261         dump = ioa_cfg->dump;
4262
4263         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4264                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4265                 return 0;
4266         }
4267         kref_get(&dump->kref);
4268         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4269
4270         if (off > dump->driver_dump.hdr.len) {
4271                 kref_put(&dump->kref, ipr_release_dump);
4272                 return 0;
4273         }
4274
4275         if (off + count > dump->driver_dump.hdr.len) {
4276                 count = dump->driver_dump.hdr.len - off;
4277                 rc = count;
4278         }
4279
4280         if (count && off < sizeof(dump->driver_dump)) {
4281                 if (off + count > sizeof(dump->driver_dump))
4282                         len = sizeof(dump->driver_dump) - off;
4283                 else
4284                         len = count;
4285                 src = (u8 *)&dump->driver_dump + off;
4286                 memcpy(buf, src, len);
4287                 buf += len;
4288                 off += len;
4289                 count -= len;
4290         }
4291
4292         off -= sizeof(dump->driver_dump);
4293
4294         if (ioa_cfg->sis64)
4295                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4296                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4297                            sizeof(struct ipr_sdt_entry));
4298         else
4299                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4300                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4301
4302         if (count && off < sdt_end) {
4303                 if (off + count > sdt_end)
4304                         len = sdt_end - off;
4305                 else
4306                         len = count;
4307                 src = (u8 *)&dump->ioa_dump + off;
4308                 memcpy(buf, src, len);
4309                 buf += len;
4310                 off += len;
4311                 count -= len;
4312         }
4313
4314         off -= sdt_end;
4315
4316         while (count) {
4317                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4318                         len = PAGE_ALIGN(off) - off;
4319                 else
4320                         len = count;
4321                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4322                 src += off & ~PAGE_MASK;
4323                 memcpy(buf, src, len);
4324                 buf += len;
4325                 off += len;
4326                 count -= len;
4327         }
4328
4329         kref_put(&dump->kref, ipr_release_dump);
4330         return rc;
4331 }
4332
4333 /**
4334  * ipr_alloc_dump - Prepare for adapter dump
4335  * @ioa_cfg:    ioa config struct
4336  *
4337  * Return value:
4338  *      0 on success / other on failure
4339  **/
4340 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4341 {
4342         struct ipr_dump *dump;
4343         __be32 **ioa_data;
4344         unsigned long lock_flags = 0;
4345
4346         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4347
4348         if (!dump) {
4349                 ipr_err("Dump memory allocation failed\n");
4350                 return -ENOMEM;
4351         }
4352
4353         if (ioa_cfg->sis64)
4354                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4355         else
4356                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4357
4358         if (!ioa_data) {
4359                 ipr_err("Dump memory allocation failed\n");
4360                 kfree(dump);
4361                 return -ENOMEM;
4362         }
4363
4364         dump->ioa_dump.ioa_data = ioa_data;
4365
4366         kref_init(&dump->kref);
4367         dump->ioa_cfg = ioa_cfg;
4368
4369         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4370
4371         if (INACTIVE != ioa_cfg->sdt_state) {
4372                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4373                 vfree(dump->ioa_dump.ioa_data);
4374                 kfree(dump);
4375                 return 0;
4376         }
4377
4378         ioa_cfg->dump = dump;
4379         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4380         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4381                 ioa_cfg->dump_taken = 1;
4382                 schedule_work(&ioa_cfg->work_q);
4383         }
4384         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4385
4386         return 0;
4387 }
4388
4389 /**
4390  * ipr_free_dump - Free adapter dump memory
4391  * @ioa_cfg:    ioa config struct
4392  *
4393  * Return value:
4394  *      0 on success / other on failure
4395  **/
4396 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4397 {
4398         struct ipr_dump *dump;
4399         unsigned long lock_flags = 0;
4400
4401         ENTER;
4402
4403         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4404         dump = ioa_cfg->dump;
4405         if (!dump) {
4406                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4407                 return 0;
4408         }
4409
4410         ioa_cfg->dump = NULL;
4411         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4412
4413         kref_put(&dump->kref, ipr_release_dump);
4414
4415         LEAVE;
4416         return 0;
4417 }
4418
4419 /**
4420  * ipr_write_dump - Setup dump state of adapter
4421  * @filp:               open sysfs file
4422  * @kobj:               kobject struct
4423  * @bin_attr:           bin_attribute struct
4424  * @buf:                buffer
4425  * @off:                offset
4426  * @count:              buffer size
4427  *
4428  * Return value:
4429  *      number of bytes printed to buffer
4430  **/
4431 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4432                               struct bin_attribute *bin_attr,
4433                               char *buf, loff_t off, size_t count)
4434 {
4435         struct device *cdev = container_of(kobj, struct device, kobj);
4436         struct Scsi_Host *shost = class_to_shost(cdev);
4437         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4438         int rc;
4439
4440         if (!capable(CAP_SYS_ADMIN))
4441                 return -EACCES;
4442
4443         if (buf[0] == '1')
4444                 rc = ipr_alloc_dump(ioa_cfg);
4445         else if (buf[0] == '0')
4446                 rc = ipr_free_dump(ioa_cfg);
4447         else
4448                 return -EINVAL;
4449
4450         if (rc)
4451                 return rc;
4452         else
4453                 return count;
4454 }
4455
4456 static struct bin_attribute ipr_dump_attr = {
4457         .attr = {
4458                 .name = "dump",
4459                 .mode = S_IRUSR | S_IWUSR,
4460         },
4461         .size = 0,
4462         .read = ipr_read_dump,
4463         .write = ipr_write_dump
4464 };
4465 #else
4466 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4467 #endif
4468
4469 /**
4470  * ipr_change_queue_depth - Change the device's queue depth
4471  * @sdev:       scsi device struct
4472  * @qdepth:     depth to set
4473  * @reason:     calling context
4474  *
4475  * Return value:
4476  *      actual depth set
4477  **/
4478 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4479 {
4480         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4481         struct ipr_resource_entry *res;
4482         unsigned long lock_flags = 0;
4483
4484         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4485         res = (struct ipr_resource_entry *)sdev->hostdata;
4486
4487         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4488                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4489         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4490
4491         scsi_change_queue_depth(sdev, qdepth);
4492         return sdev->queue_depth;
4493 }
4494
4495 /**
4496  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4497  * @dev:        device struct
4498  * @attr:       device attribute structure
4499  * @buf:        buffer
4500  *
4501  * Return value:
4502  *      number of bytes printed to buffer
4503  **/
4504 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4505 {
4506         struct scsi_device *sdev = to_scsi_device(dev);
4507         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4508         struct ipr_resource_entry *res;
4509         unsigned long lock_flags = 0;
4510         ssize_t len = -ENXIO;
4511
4512         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4513         res = (struct ipr_resource_entry *)sdev->hostdata;
4514         if (res)
4515                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4516         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4517         return len;
4518 }
4519
4520 static struct device_attribute ipr_adapter_handle_attr = {
4521         .attr = {
4522                 .name =         "adapter_handle",
4523                 .mode =         S_IRUSR,
4524         },
4525         .show = ipr_show_adapter_handle
4526 };
4527
4528 /**
4529  * ipr_show_resource_path - Show the resource path or the resource address for
4530  *                          this device.
4531  * @dev:        device struct
4532  * @attr:       device attribute structure
4533  * @buf:        buffer
4534  *
4535  * Return value:
4536  *      number of bytes printed to buffer
4537  **/
4538 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4539 {
4540         struct scsi_device *sdev = to_scsi_device(dev);
4541         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4542         struct ipr_resource_entry *res;
4543         unsigned long lock_flags = 0;
4544         ssize_t len = -ENXIO;
4545         char buffer[IPR_MAX_RES_PATH_LENGTH];
4546
4547         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4548         res = (struct ipr_resource_entry *)sdev->hostdata;
4549         if (res && ioa_cfg->sis64)
4550                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4551                                __ipr_format_res_path(res->res_path, buffer,
4552                                                      sizeof(buffer)));
4553         else if (res)
4554                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4555                                res->bus, res->target, res->lun);
4556
4557         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4558         return len;
4559 }
4560
4561 static struct device_attribute ipr_resource_path_attr = {
4562         .attr = {
4563                 .name =         "resource_path",
4564                 .mode =         S_IRUGO,
4565         },
4566         .show = ipr_show_resource_path
4567 };
4568
4569 /**
4570  * ipr_show_device_id - Show the device_id for this device.
4571  * @dev:        device struct
4572  * @attr:       device attribute structure
4573  * @buf:        buffer
4574  *
4575  * Return value:
4576  *      number of bytes printed to buffer
4577  **/
4578 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4579 {
4580         struct scsi_device *sdev = to_scsi_device(dev);
4581         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4582         struct ipr_resource_entry *res;
4583         unsigned long lock_flags = 0;
4584         ssize_t len = -ENXIO;
4585
4586         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587         res = (struct ipr_resource_entry *)sdev->hostdata;
4588         if (res && ioa_cfg->sis64)
4589                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4590         else if (res)
4591                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4592
4593         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4594         return len;
4595 }
4596
4597 static struct device_attribute ipr_device_id_attr = {
4598         .attr = {
4599                 .name =         "device_id",
4600                 .mode =         S_IRUGO,
4601         },
4602         .show = ipr_show_device_id
4603 };
4604
4605 /**
4606  * ipr_show_resource_type - Show the resource type for this device.
4607  * @dev:        device struct
4608  * @attr:       device attribute structure
4609  * @buf:        buffer
4610  *
4611  * Return value:
4612  *      number of bytes printed to buffer
4613  **/
4614 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4615 {
4616         struct scsi_device *sdev = to_scsi_device(dev);
4617         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4618         struct ipr_resource_entry *res;
4619         unsigned long lock_flags = 0;
4620         ssize_t len = -ENXIO;
4621
4622         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4623         res = (struct ipr_resource_entry *)sdev->hostdata;
4624
4625         if (res)
4626                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4627
4628         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4629         return len;
4630 }
4631
4632 static struct device_attribute ipr_resource_type_attr = {
4633         .attr = {
4634                 .name =         "resource_type",
4635                 .mode =         S_IRUGO,
4636         },
4637         .show = ipr_show_resource_type
4638 };
4639
4640 /**
4641  * ipr_show_raw_mode - Show the adapter's raw mode
4642  * @dev:        class device struct
4643  * @buf:        buffer
4644  *
4645  * Return value:
4646  *      number of bytes printed to buffer
4647  **/
4648 static ssize_t ipr_show_raw_mode(struct device *dev,
4649                                  struct device_attribute *attr, char *buf)
4650 {
4651         struct scsi_device *sdev = to_scsi_device(dev);
4652         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4653         struct ipr_resource_entry *res;
4654         unsigned long lock_flags = 0;
4655         ssize_t len;
4656
4657         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4658         res = (struct ipr_resource_entry *)sdev->hostdata;
4659         if (res)
4660                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4661         else
4662                 len = -ENXIO;
4663         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4664         return len;
4665 }
4666
4667 /**
4668  * ipr_store_raw_mode - Change the adapter's raw mode
4669  * @dev:        class device struct
4670  * @buf:        buffer
4671  *
4672  * Return value:
4673  *      number of bytes printed to buffer
4674  **/
4675 static ssize_t ipr_store_raw_mode(struct device *dev,
4676                                   struct device_attribute *attr,
4677                                   const char *buf, size_t count)
4678 {
4679         struct scsi_device *sdev = to_scsi_device(dev);
4680         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4681         struct ipr_resource_entry *res;
4682         unsigned long lock_flags = 0;
4683         ssize_t len;
4684
4685         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686         res = (struct ipr_resource_entry *)sdev->hostdata;
4687         if (res) {
4688                 if (ipr_is_af_dasd_device(res)) {
4689                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4690                         len = strlen(buf);
4691                         if (res->sdev)
4692                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4693                                         res->raw_mode ? "enabled" : "disabled");
4694                 } else
4695                         len = -EINVAL;
4696         } else
4697                 len = -ENXIO;
4698         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4699         return len;
4700 }
4701
4702 static struct device_attribute ipr_raw_mode_attr = {
4703         .attr = {
4704                 .name =         "raw_mode",
4705                 .mode =         S_IRUGO | S_IWUSR,
4706         },
4707         .show = ipr_show_raw_mode,
4708         .store = ipr_store_raw_mode
4709 };
4710
4711 static struct device_attribute *ipr_dev_attrs[] = {
4712         &ipr_adapter_handle_attr,
4713         &ipr_resource_path_attr,
4714         &ipr_device_id_attr,
4715         &ipr_resource_type_attr,
4716         &ipr_raw_mode_attr,
4717         NULL,
4718 };
4719
4720 /**
4721  * ipr_biosparam - Return the HSC mapping
4722  * @sdev:                       scsi device struct
4723  * @block_device:       block device pointer
4724  * @capacity:           capacity of the device
4725  * @parm:                       Array containing returned HSC values.
4726  *
4727  * This function generates the HSC parms that fdisk uses.
4728  * We want to make sure we return something that places partitions
4729  * on 4k boundaries for best performance with the IOA.
4730  *
4731  * Return value:
4732  *      0 on success
4733  **/
4734 static int ipr_biosparam(struct scsi_device *sdev,
4735                          struct block_device *block_device,
4736                          sector_t capacity, int *parm)
4737 {
4738         int heads, sectors;
4739         sector_t cylinders;
4740
4741         heads = 128;
4742         sectors = 32;
4743
4744         cylinders = capacity;
4745         sector_div(cylinders, (128 * 32));
4746
4747         /* return result */
4748         parm[0] = heads;
4749         parm[1] = sectors;
4750         parm[2] = cylinders;
4751
4752         return 0;
4753 }
4754
4755 /**
4756  * ipr_find_starget - Find target based on bus/target.
4757  * @starget:    scsi target struct
4758  *
4759  * Return value:
4760  *      resource entry pointer if found / NULL if not found
4761  **/
4762 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4763 {
4764         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4765         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4766         struct ipr_resource_entry *res;
4767
4768         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4769                 if ((res->bus == starget->channel) &&
4770                     (res->target == starget->id)) {
4771                         return res;
4772                 }
4773         }
4774
4775         return NULL;
4776 }
4777
4778 static struct ata_port_info sata_port_info;
4779
4780 /**
4781  * ipr_target_alloc - Prepare for commands to a SCSI target
4782  * @starget:    scsi target struct
4783  *
4784  * If the device is a SATA device, this function allocates an
4785  * ATA port with libata, else it does nothing.
4786  *
4787  * Return value:
4788  *      0 on success / non-0 on failure
4789  **/
4790 static int ipr_target_alloc(struct scsi_target *starget)
4791 {
4792         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4793         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4794         struct ipr_sata_port *sata_port;
4795         struct ata_port *ap;
4796         struct ipr_resource_entry *res;
4797         unsigned long lock_flags;
4798
4799         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4800         res = ipr_find_starget(starget);
4801         starget->hostdata = NULL;
4802
4803         if (res && ipr_is_gata(res)) {
4804                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4806                 if (!sata_port)
4807                         return -ENOMEM;
4808
4809                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4810                 if (ap) {
4811                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4812                         sata_port->ioa_cfg = ioa_cfg;
4813                         sata_port->ap = ap;
4814                         sata_port->res = res;
4815
4816                         res->sata_port = sata_port;
4817                         ap->private_data = sata_port;
4818                         starget->hostdata = sata_port;
4819                 } else {
4820                         kfree(sata_port);
4821                         return -ENOMEM;
4822                 }
4823         }
4824         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4825
4826         return 0;
4827 }
4828
4829 /**
4830  * ipr_target_destroy - Destroy a SCSI target
4831  * @starget:    scsi target struct
4832  *
4833  * If the device was a SATA device, this function frees the libata
4834  * ATA port, else it does nothing.
4835  *
4836  **/
4837 static void ipr_target_destroy(struct scsi_target *starget)
4838 {
4839         struct ipr_sata_port *sata_port = starget->hostdata;
4840         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4841         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4842
4843         if (ioa_cfg->sis64) {
4844                 if (!ipr_find_starget(starget)) {
4845                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4846                                 clear_bit(starget->id, ioa_cfg->array_ids);
4847                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4848                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4849                         else if (starget->channel == 0)
4850                                 clear_bit(starget->id, ioa_cfg->target_ids);
4851                 }
4852         }
4853
4854         if (sata_port) {
4855                 starget->hostdata = NULL;
4856                 ata_sas_port_destroy(sata_port->ap);
4857                 kfree(sata_port);
4858         }
4859 }
4860
4861 /**
4862  * ipr_find_sdev - Find device based on bus/target/lun.
4863  * @sdev:       scsi device struct
4864  *
4865  * Return value:
4866  *      resource entry pointer if found / NULL if not found
4867  **/
4868 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4869 {
4870         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4871         struct ipr_resource_entry *res;
4872
4873         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4874                 if ((res->bus == sdev->channel) &&
4875                     (res->target == sdev->id) &&
4876                     (res->lun == sdev->lun))
4877                         return res;
4878         }
4879
4880         return NULL;
4881 }
4882
4883 /**
4884  * ipr_slave_destroy - Unconfigure a SCSI device
4885  * @sdev:       scsi device struct
4886  *
4887  * Return value:
4888  *      nothing
4889  **/
4890 static void ipr_slave_destroy(struct scsi_device *sdev)
4891 {
4892         struct ipr_resource_entry *res;
4893         struct ipr_ioa_cfg *ioa_cfg;
4894         unsigned long lock_flags = 0;
4895
4896         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4897
4898         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4899         res = (struct ipr_resource_entry *) sdev->hostdata;
4900         if (res) {
4901                 if (res->sata_port)
4902                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4903                 sdev->hostdata = NULL;
4904                 res->sdev = NULL;
4905                 res->sata_port = NULL;
4906         }
4907         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4908 }
4909
4910 /**
4911  * ipr_slave_configure - Configure a SCSI device
4912  * @sdev:       scsi device struct
4913  *
4914  * This function configures the specified scsi device.
4915  *
4916  * Return value:
4917  *      0 on success
4918  **/
4919 static int ipr_slave_configure(struct scsi_device *sdev)
4920 {
4921         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4922         struct ipr_resource_entry *res;
4923         struct ata_port *ap = NULL;
4924         unsigned long lock_flags = 0;
4925         char buffer[IPR_MAX_RES_PATH_LENGTH];
4926
4927         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4928         res = sdev->hostdata;
4929         if (res) {
4930                 if (ipr_is_af_dasd_device(res))
4931                         sdev->type = TYPE_RAID;
4932                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4933                         sdev->scsi_level = 4;
4934                         sdev->no_uld_attach = 1;
4935                 }
4936                 if (ipr_is_vset_device(res)) {
4937                         sdev->scsi_level = SCSI_SPC_3;
4938                         blk_queue_rq_timeout(sdev->request_queue,
4939                                              IPR_VSET_RW_TIMEOUT);
4940                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4941                 }
4942                 if (ipr_is_gata(res) && res->sata_port)
4943                         ap = res->sata_port->ap;
4944                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4945
4946                 if (ap) {
4947                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4948                         ata_sas_slave_configure(sdev, ap);
4949                 }
4950
4951                 if (ioa_cfg->sis64)
4952                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4953                                     ipr_format_res_path(ioa_cfg,
4954                                 res->res_path, buffer, sizeof(buffer)));
4955                 return 0;
4956         }
4957         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4958         return 0;
4959 }
4960
4961 /**
4962  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4963  * @sdev:       scsi device struct
4964  *
4965  * This function initializes an ATA port so that future commands
4966  * sent through queuecommand will work.
4967  *
4968  * Return value:
4969  *      0 on success
4970  **/
4971 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4972 {
4973         struct ipr_sata_port *sata_port = NULL;
4974         int rc = -ENXIO;
4975
4976         ENTER;
4977         if (sdev->sdev_target)
4978                 sata_port = sdev->sdev_target->hostdata;
4979         if (sata_port) {
4980                 rc = ata_sas_port_init(sata_port->ap);
4981                 if (rc == 0)
4982                         rc = ata_sas_sync_probe(sata_port->ap);
4983         }
4984
4985         if (rc)
4986                 ipr_slave_destroy(sdev);
4987
4988         LEAVE;
4989         return rc;
4990 }
4991
4992 /**
4993  * ipr_slave_alloc - Prepare for commands to a device.
4994  * @sdev:       scsi device struct
4995  *
4996  * This function saves a pointer to the resource entry
4997  * in the scsi device struct if the device exists. We
4998  * can then use this pointer in ipr_queuecommand when
4999  * handling new commands.
5000  *
5001  * Return value:
5002  *      0 on success / -ENXIO if device does not exist
5003  **/
5004 static int ipr_slave_alloc(struct scsi_device *sdev)
5005 {
5006         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5007         struct ipr_resource_entry *res;
5008         unsigned long lock_flags;
5009         int rc = -ENXIO;
5010
5011         sdev->hostdata = NULL;
5012
5013         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5014
5015         res = ipr_find_sdev(sdev);
5016         if (res) {
5017                 res->sdev = sdev;
5018                 res->add_to_ml = 0;
5019                 res->in_erp = 0;
5020                 sdev->hostdata = res;
5021                 if (!ipr_is_naca_model(res))
5022                         res->needs_sync_complete = 1;
5023                 rc = 0;
5024                 if (ipr_is_gata(res)) {
5025                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5026                         return ipr_ata_slave_alloc(sdev);
5027                 }
5028         }
5029
5030         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5031
5032         return rc;
5033 }
5034
5035 /**
5036  * ipr_match_lun - Match function for specified LUN
5037  * @ipr_cmd:    ipr command struct
5038  * @device:             device to match (sdev)
5039  *
5040  * Returns:
5041  *      1 if command matches sdev / 0 if command does not match sdev
5042  **/
5043 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5044 {
5045         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5046                 return 1;
5047         return 0;
5048 }
5049
5050 /**
5051  * ipr_cmnd_is_free - Check if a command is free or not
5052  * @ipr_cmd     ipr command struct
5053  *
5054  * Returns:
5055  *      true / false
5056  **/
5057 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5058 {
5059         struct ipr_cmnd *loop_cmd;
5060
5061         list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5062                 if (loop_cmd == ipr_cmd)
5063                         return true;
5064         }
5065
5066         return false;
5067 }
5068
5069 /**
5070  * ipr_match_res - Match function for specified resource entry
5071  * @ipr_cmd:    ipr command struct
5072  * @resource:   resource entry to match
5073  *
5074  * Returns:
5075  *      1 if command matches sdev / 0 if command does not match sdev
5076  **/
5077 static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5078 {
5079         struct ipr_resource_entry *res = resource;
5080
5081         if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5082                 return 1;
5083         return 0;
5084 }
5085
5086 /**
5087  * ipr_wait_for_ops - Wait for matching commands to complete
5088  * @ipr_cmd:    ipr command struct
5089  * @device:             device to match (sdev)
5090  * @match:              match function to use
5091  *
5092  * Returns:
5093  *      SUCCESS / FAILED
5094  **/
5095 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5096                             int (*match)(struct ipr_cmnd *, void *))
5097 {
5098         struct ipr_cmnd *ipr_cmd;
5099         int wait, i;
5100         unsigned long flags;
5101         struct ipr_hrr_queue *hrrq;
5102         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5103         DECLARE_COMPLETION_ONSTACK(comp);
5104
5105         ENTER;
5106         do {
5107                 wait = 0;
5108
5109                 for_each_hrrq(hrrq, ioa_cfg) {
5110                         spin_lock_irqsave(hrrq->lock, flags);
5111                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5112                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5113                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5114                                         if (match(ipr_cmd, device)) {
5115                                                 ipr_cmd->eh_comp = &comp;
5116                                                 wait++;
5117                                         }
5118                                 }
5119                         }
5120                         spin_unlock_irqrestore(hrrq->lock, flags);
5121                 }
5122
5123                 if (wait) {
5124                         timeout = wait_for_completion_timeout(&comp, timeout);
5125
5126                         if (!timeout) {
5127                                 wait = 0;
5128
5129                                 for_each_hrrq(hrrq, ioa_cfg) {
5130                                         spin_lock_irqsave(hrrq->lock, flags);
5131                                         for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5132                                                 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5133                                                 if (!ipr_cmnd_is_free(ipr_cmd)) {
5134                                                         if (match(ipr_cmd, device)) {
5135                                                                 ipr_cmd->eh_comp = NULL;
5136                                                                 wait++;
5137                                                         }
5138                                                 }
5139                                         }
5140                                         spin_unlock_irqrestore(hrrq->lock, flags);
5141                                 }
5142
5143                                 if (wait)
5144                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5145                                 LEAVE;
5146                                 return wait ? FAILED : SUCCESS;
5147                         }
5148                 }
5149         } while (wait);
5150
5151         LEAVE;
5152         return SUCCESS;
5153 }
5154
5155 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
5156 {
5157         struct ipr_ioa_cfg *ioa_cfg;
5158         unsigned long lock_flags = 0;
5159         int rc = SUCCESS;
5160
5161         ENTER;
5162         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5163         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5164
5165         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5166                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5167                 dev_err(&ioa_cfg->pdev->dev,
5168                         "Adapter being reset as a result of error recovery.\n");
5169
5170                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5171                         ioa_cfg->sdt_state = GET_DUMP;
5172         }
5173
5174         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5175         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5176         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5177
5178         /* If we got hit with a host reset while we were already resetting
5179          the adapter for some reason, and the reset failed. */
5180         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5181                 ipr_trace;
5182                 rc = FAILED;
5183         }
5184
5185         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5186         LEAVE;
5187         return rc;
5188 }
5189
5190 /**
5191  * ipr_device_reset - Reset the device
5192  * @ioa_cfg:    ioa config struct
5193  * @res:                resource entry struct
5194  *
5195  * This function issues a device reset to the affected device.
5196  * If the device is a SCSI device, a LUN reset will be sent
5197  * to the device first. If that does not work, a target reset
5198  * will be sent. If the device is a SATA device, a PHY reset will
5199  * be sent.
5200  *
5201  * Return value:
5202  *      0 on success / non-zero on failure
5203  **/
5204 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5205                             struct ipr_resource_entry *res)
5206 {
5207         struct ipr_cmnd *ipr_cmd;
5208         struct ipr_ioarcb *ioarcb;
5209         struct ipr_cmd_pkt *cmd_pkt;
5210         struct ipr_ioarcb_ata_regs *regs;
5211         u32 ioasc;
5212
5213         ENTER;
5214         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5215         ioarcb = &ipr_cmd->ioarcb;
5216         cmd_pkt = &ioarcb->cmd_pkt;
5217
5218         if (ipr_cmd->ioa_cfg->sis64) {
5219                 regs = &ipr_cmd->i.ata_ioadl.regs;
5220                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5221         } else
5222                 regs = &ioarcb->u.add_data.u.regs;
5223
5224         ioarcb->res_handle = res->res_handle;
5225         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5226         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5227         if (ipr_is_gata(res)) {
5228                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5229                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5230                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5231         }
5232
5233         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5234         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5235         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5236         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5237                 if (ipr_cmd->ioa_cfg->sis64)
5238                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5239                                sizeof(struct ipr_ioasa_gata));
5240                 else
5241                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5242                                sizeof(struct ipr_ioasa_gata));
5243         }
5244
5245         LEAVE;
5246         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5247 }
5248
5249 /**
5250  * ipr_sata_reset - Reset the SATA port
5251  * @link:       SATA link to reset
5252  * @classes:    class of the attached device
5253  *
5254  * This function issues a SATA phy reset to the affected ATA link.
5255  *
5256  * Return value:
5257  *      0 on success / non-zero on failure
5258  **/
5259 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5260                                 unsigned long deadline)
5261 {
5262         struct ipr_sata_port *sata_port = link->ap->private_data;
5263         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5264         struct ipr_resource_entry *res;
5265         unsigned long lock_flags = 0;
5266         int rc = -ENXIO, ret;
5267
5268         ENTER;
5269         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5270         while (ioa_cfg->in_reset_reload) {
5271                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5272                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5273                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5274         }
5275
5276         res = sata_port->res;
5277         if (res) {
5278                 rc = ipr_device_reset(ioa_cfg, res);
5279                 *classes = res->ata_class;
5280                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5281
5282                 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5283                 if (ret != SUCCESS) {
5284                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5285                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5286                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5287
5288                         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5289                 }
5290         } else
5291                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5292
5293         LEAVE;
5294         return rc;
5295 }
5296
5297 /**
5298  * ipr_eh_dev_reset - Reset the device
5299  * @scsi_cmd:   scsi command struct
5300  *
5301  * This function issues a device reset to the affected device.
5302  * A LUN reset will be sent to the device first. If that does
5303  * not work, a target reset will be sent.
5304  *
5305  * Return value:
5306  *      SUCCESS / FAILED
5307  **/
5308 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5309 {
5310         struct ipr_cmnd *ipr_cmd;
5311         struct ipr_ioa_cfg *ioa_cfg;
5312         struct ipr_resource_entry *res;
5313         struct ata_port *ap;
5314         int rc = 0, i;
5315         struct ipr_hrr_queue *hrrq;
5316
5317         ENTER;
5318         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5319         res = scsi_cmd->device->hostdata;
5320
5321         /*
5322          * If we are currently going through reset/reload, return failed. This will force the
5323          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5324          * reset to complete
5325          */
5326         if (ioa_cfg->in_reset_reload)
5327                 return FAILED;
5328         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5329                 return FAILED;
5330
5331         for_each_hrrq(hrrq, ioa_cfg) {
5332                 spin_lock(&hrrq->_lock);
5333                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5334                         ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5335
5336                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5337                                 if (!ipr_cmd->qc)
5338                                         continue;
5339                                 if (ipr_cmnd_is_free(ipr_cmd))
5340                                         continue;
5341
5342                                 ipr_cmd->done = ipr_sata_eh_done;
5343                                 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5344                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5345                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5346                                 }
5347                         }
5348                 }
5349                 spin_unlock(&hrrq->_lock);
5350         }
5351         res->resetting_device = 1;
5352         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5353
5354         if (ipr_is_gata(res) && res->sata_port) {
5355                 ap = res->sata_port->ap;
5356                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5357                 ata_std_error_handler(ap);
5358                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5359         } else
5360                 rc = ipr_device_reset(ioa_cfg, res);
5361         res->resetting_device = 0;
5362         res->reset_occurred = 1;
5363
5364         LEAVE;
5365         return rc ? FAILED : SUCCESS;
5366 }
5367
5368 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5369 {
5370         int rc;
5371         struct ipr_ioa_cfg *ioa_cfg;
5372         struct ipr_resource_entry *res;
5373
5374         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5375         res = cmd->device->hostdata;
5376
5377         if (!res)
5378                 return FAILED;
5379
5380         spin_lock_irq(cmd->device->host->host_lock);
5381         rc = __ipr_eh_dev_reset(cmd);
5382         spin_unlock_irq(cmd->device->host->host_lock);
5383
5384         if (rc == SUCCESS) {
5385                 if (ipr_is_gata(res) && res->sata_port)
5386                         rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5387                 else
5388                         rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5389         }
5390
5391         return rc;
5392 }
5393
5394 /**
5395  * ipr_bus_reset_done - Op done function for bus reset.
5396  * @ipr_cmd:    ipr command struct
5397  *
5398  * This function is the op done function for a bus reset
5399  *
5400  * Return value:
5401  *      none
5402  **/
5403 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5404 {
5405         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5406         struct ipr_resource_entry *res;
5407
5408         ENTER;
5409         if (!ioa_cfg->sis64)
5410                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5411                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5412                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5413                                 break;
5414                         }
5415                 }
5416
5417         /*
5418          * If abort has not completed, indicate the reset has, else call the
5419          * abort's done function to wake the sleeping eh thread
5420          */
5421         if (ipr_cmd->sibling->sibling)
5422                 ipr_cmd->sibling->sibling = NULL;
5423         else
5424                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5425
5426         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5427         LEAVE;
5428 }
5429
5430 /**
5431  * ipr_abort_timeout - An abort task has timed out
5432  * @ipr_cmd:    ipr command struct
5433  *
5434  * This function handles when an abort task times out. If this
5435  * happens we issue a bus reset since we have resources tied
5436  * up that must be freed before returning to the midlayer.
5437  *
5438  * Return value:
5439  *      none
5440  **/
5441 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5442 {
5443         struct ipr_cmnd *reset_cmd;
5444         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5445         struct ipr_cmd_pkt *cmd_pkt;
5446         unsigned long lock_flags = 0;
5447
5448         ENTER;
5449         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5450         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5451                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5452                 return;
5453         }
5454
5455         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5456         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5457         ipr_cmd->sibling = reset_cmd;
5458         reset_cmd->sibling = ipr_cmd;
5459         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5460         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5461         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5462         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5463         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5464
5465         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5466         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5467         LEAVE;
5468 }
5469
5470 /**
5471  * ipr_cancel_op - Cancel specified op
5472  * @scsi_cmd:   scsi command struct
5473  *
5474  * This function cancels specified op.
5475  *
5476  * Return value:
5477  *      SUCCESS / FAILED
5478  **/
5479 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5480 {
5481         struct ipr_cmnd *ipr_cmd;
5482         struct ipr_ioa_cfg *ioa_cfg;
5483         struct ipr_resource_entry *res;
5484         struct ipr_cmd_pkt *cmd_pkt;
5485         u32 ioasc, int_reg;
5486         int i, op_found = 0;
5487         struct ipr_hrr_queue *hrrq;
5488
5489         ENTER;
5490         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5491         res = scsi_cmd->device->hostdata;
5492
5493         /* If we are currently going through reset/reload, return failed.
5494          * This will force the mid-layer to call ipr_eh_host_reset,
5495          * which will then go to sleep and wait for the reset to complete
5496          */
5497         if (ioa_cfg->in_reset_reload ||
5498             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5499                 return FAILED;
5500         if (!res)
5501                 return FAILED;
5502
5503         /*
5504          * If we are aborting a timed out op, chances are that the timeout was caused
5505          * by a still not detected EEH error. In such cases, reading a register will
5506          * trigger the EEH recovery infrastructure.
5507          */
5508         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5509
5510         if (!ipr_is_gscsi(res))
5511                 return FAILED;
5512
5513         for_each_hrrq(hrrq, ioa_cfg) {
5514                 spin_lock(&hrrq->_lock);
5515                 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5516                         if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5517                                 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5518                                         op_found = 1;
5519                                         break;
5520                                 }
5521                         }
5522                 }
5523                 spin_unlock(&hrrq->_lock);
5524         }
5525
5526         if (!op_found)
5527                 return SUCCESS;
5528
5529         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5530         ipr_cmd->ioarcb.res_handle = res->res_handle;
5531         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5532         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5533         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5534         ipr_cmd->u.sdev = scsi_cmd->device;
5535
5536         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5537                     scsi_cmd->cmnd[0]);
5538         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5539         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5540
5541         /*
5542          * If the abort task timed out and we sent a bus reset, we will get
5543          * one the following responses to the abort
5544          */
5545         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5546                 ioasc = 0;
5547                 ipr_trace;
5548         }
5549
5550         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5551         if (!ipr_is_naca_model(res))
5552                 res->needs_sync_complete = 1;
5553
5554         LEAVE;
5555         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5556 }
5557
5558 /**
5559  * ipr_eh_abort - Abort a single op
5560  * @scsi_cmd:   scsi command struct
5561  *
5562  * Return value:
5563  *      0 if scan in progress / 1 if scan is complete
5564  **/
5565 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5566 {
5567         unsigned long lock_flags;
5568         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5569         int rc = 0;
5570
5571         spin_lock_irqsave(shost->host_lock, lock_flags);
5572         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5573                 rc = 1;
5574         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5575                 rc = 1;
5576         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5577         return rc;
5578 }
5579
5580 /**
5581  * ipr_eh_host_reset - Reset the host adapter
5582  * @scsi_cmd:   scsi command struct
5583  *
5584  * Return value:
5585  *      SUCCESS / FAILED
5586  **/
5587 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5588 {
5589         unsigned long flags;
5590         int rc;
5591         struct ipr_ioa_cfg *ioa_cfg;
5592
5593         ENTER;
5594
5595         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5596
5597         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5598         rc = ipr_cancel_op(scsi_cmd);
5599         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5600
5601         if (rc == SUCCESS)
5602                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5603         LEAVE;
5604         return rc;
5605 }
5606
5607 /**
5608  * ipr_handle_other_interrupt - Handle "other" interrupts
5609  * @ioa_cfg:    ioa config struct
5610  * @int_reg:    interrupt register
5611  *
5612  * Return value:
5613  *      IRQ_NONE / IRQ_HANDLED
5614  **/
5615 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5616                                               u32 int_reg)
5617 {
5618         irqreturn_t rc = IRQ_HANDLED;
5619         u32 int_mask_reg;
5620
5621         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5622         int_reg &= ~int_mask_reg;
5623
5624         /* If an interrupt on the adapter did not occur, ignore it.
5625          * Or in the case of SIS 64, check for a stage change interrupt.
5626          */
5627         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5628                 if (ioa_cfg->sis64) {
5629                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5630                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5631                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5632
5633                                 /* clear stage change */
5634                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5635                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5636                                 list_del(&ioa_cfg->reset_cmd->queue);
5637                                 del_timer(&ioa_cfg->reset_cmd->timer);
5638                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5639                                 return IRQ_HANDLED;
5640                         }
5641                 }
5642
5643                 return IRQ_NONE;
5644         }
5645
5646         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5647                 /* Mask the interrupt */
5648                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5649                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5650
5651                 list_del(&ioa_cfg->reset_cmd->queue);
5652                 del_timer(&ioa_cfg->reset_cmd->timer);
5653                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5654         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5655                 if (ioa_cfg->clear_isr) {
5656                         if (ipr_debug && printk_ratelimit())
5657                                 dev_err(&ioa_cfg->pdev->dev,
5658                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5659                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5660                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5661                         return IRQ_NONE;
5662                 }
5663         } else {
5664                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5665                         ioa_cfg->ioa_unit_checked = 1;
5666                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5667                         dev_err(&ioa_cfg->pdev->dev,
5668                                 "No Host RRQ. 0x%08X\n", int_reg);
5669                 else
5670                         dev_err(&ioa_cfg->pdev->dev,
5671                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5672
5673                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5674                         ioa_cfg->sdt_state = GET_DUMP;
5675
5676                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5677                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5678         }
5679
5680         return rc;
5681 }
5682
5683 /**
5684  * ipr_isr_eh - Interrupt service routine error handler
5685  * @ioa_cfg:    ioa config struct
5686  * @msg:        message to log
5687  *
5688  * Return value:
5689  *      none
5690  **/
5691 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5692 {
5693         ioa_cfg->errors_logged++;
5694         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5695
5696         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5697                 ioa_cfg->sdt_state = GET_DUMP;
5698
5699         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5700 }
5701
5702 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5703                                                 struct list_head *doneq)
5704 {
5705         u32 ioasc;
5706         u16 cmd_index;
5707         struct ipr_cmnd *ipr_cmd;
5708         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5709         int num_hrrq = 0;
5710
5711         /* If interrupts are disabled, ignore the interrupt */
5712         if (!hrr_queue->allow_interrupts)
5713                 return 0;
5714
5715         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5716                hrr_queue->toggle_bit) {
5717
5718                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5719                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5720                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5721
5722                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5723                              cmd_index < hrr_queue->min_cmd_id)) {
5724                         ipr_isr_eh(ioa_cfg,
5725                                 "Invalid response handle from IOA: ",
5726                                 cmd_index);
5727                         break;
5728                 }
5729
5730                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5731                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5732
5733                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5734
5735                 list_move_tail(&ipr_cmd->queue, doneq);
5736
5737                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5738                         hrr_queue->hrrq_curr++;
5739                 } else {
5740                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5741                         hrr_queue->toggle_bit ^= 1u;
5742                 }
5743                 num_hrrq++;
5744                 if (budget > 0 && num_hrrq >= budget)
5745                         break;
5746         }
5747
5748         return num_hrrq;
5749 }
5750
5751 static int ipr_iopoll(struct irq_poll *iop, int budget)
5752 {
5753         struct ipr_ioa_cfg *ioa_cfg;
5754         struct ipr_hrr_queue *hrrq;
5755         struct ipr_cmnd *ipr_cmd, *temp;
5756         unsigned long hrrq_flags;
5757         int completed_ops;
5758         LIST_HEAD(doneq);
5759
5760         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5761         ioa_cfg = hrrq->ioa_cfg;
5762
5763         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5764         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5765
5766         if (completed_ops < budget)
5767                 irq_poll_complete(iop);
5768         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5769
5770         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5771                 list_del(&ipr_cmd->queue);
5772                 del_timer(&ipr_cmd->timer);
5773                 ipr_cmd->fast_done(ipr_cmd);
5774         }
5775
5776         return completed_ops;
5777 }
5778
5779 /**
5780  * ipr_isr - Interrupt service routine
5781  * @irq:        irq number
5782  * @devp:       pointer to ioa config struct
5783  *
5784  * Return value:
5785  *      IRQ_NONE / IRQ_HANDLED
5786  **/
5787 static irqreturn_t ipr_isr(int irq, void *devp)
5788 {
5789         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5790         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5791         unsigned long hrrq_flags = 0;
5792         u32 int_reg = 0;
5793         int num_hrrq = 0;
5794         int irq_none = 0;
5795         struct ipr_cmnd *ipr_cmd, *temp;
5796         irqreturn_t rc = IRQ_NONE;
5797         LIST_HEAD(doneq);
5798
5799         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5800         /* If interrupts are disabled, ignore the interrupt */
5801         if (!hrrq->allow_interrupts) {
5802                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5803                 return IRQ_NONE;
5804         }
5805
5806         while (1) {
5807                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5808                         rc =  IRQ_HANDLED;
5809
5810                         if (!ioa_cfg->clear_isr)
5811                                 break;
5812
5813                         /* Clear the PCI interrupt */
5814                         num_hrrq = 0;
5815                         do {
5816                                 writel(IPR_PCII_HRRQ_UPDATED,
5817                                      ioa_cfg->regs.clr_interrupt_reg32);
5818                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5819                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5820                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5821
5822                 } else if (rc == IRQ_NONE && irq_none == 0) {
5823                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5824                         irq_none++;
5825                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5826                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5827                         ipr_isr_eh(ioa_cfg,
5828                                 "Error clearing HRRQ: ", num_hrrq);
5829                         rc = IRQ_HANDLED;
5830                         break;
5831                 } else
5832                         break;
5833         }
5834
5835         if (unlikely(rc == IRQ_NONE))
5836                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5837
5838         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5839         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5840                 list_del(&ipr_cmd->queue);
5841                 del_timer(&ipr_cmd->timer);
5842                 ipr_cmd->fast_done(ipr_cmd);
5843         }
5844         return rc;
5845 }
5846
5847 /**
5848  * ipr_isr_mhrrq - Interrupt service routine
5849  * @irq:        irq number
5850  * @devp:       pointer to ioa config struct
5851  *
5852  * Return value:
5853  *      IRQ_NONE / IRQ_HANDLED
5854  **/
5855 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5856 {
5857         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5858         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5859         unsigned long hrrq_flags = 0;
5860         struct ipr_cmnd *ipr_cmd, *temp;
5861         irqreturn_t rc = IRQ_NONE;
5862         LIST_HEAD(doneq);
5863
5864         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5865
5866         /* If interrupts are disabled, ignore the interrupt */
5867         if (!hrrq->allow_interrupts) {
5868                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5869                 return IRQ_NONE;
5870         }
5871
5872         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5873                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5874                        hrrq->toggle_bit) {
5875                         irq_poll_sched(&hrrq->iopoll);
5876                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5877                         return IRQ_HANDLED;
5878                 }
5879         } else {
5880                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5881                         hrrq->toggle_bit)
5882
5883                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5884                                 rc =  IRQ_HANDLED;
5885         }
5886
5887         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5888
5889         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5890                 list_del(&ipr_cmd->queue);
5891                 del_timer(&ipr_cmd->timer);
5892                 ipr_cmd->fast_done(ipr_cmd);
5893         }
5894         return rc;
5895 }
5896
5897 /**
5898  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5899  * @ioa_cfg:    ioa config struct
5900  * @ipr_cmd:    ipr command struct
5901  *
5902  * Return value:
5903  *      0 on success / -1 on failure
5904  **/
5905 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5906                              struct ipr_cmnd *ipr_cmd)
5907 {
5908         int i, nseg;
5909         struct scatterlist *sg;
5910         u32 length;
5911         u32 ioadl_flags = 0;
5912         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5913         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5914         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5915
5916         length = scsi_bufflen(scsi_cmd);
5917         if (!length)
5918                 return 0;
5919
5920         nseg = scsi_dma_map(scsi_cmd);
5921         if (nseg < 0) {
5922                 if (printk_ratelimit())
5923                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5924                 return -1;
5925         }
5926
5927         ipr_cmd->dma_use_sg = nseg;
5928
5929         ioarcb->data_transfer_length = cpu_to_be32(length);
5930         ioarcb->ioadl_len =
5931                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5932
5933         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5934                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5935                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5936         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5937                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5938
5939         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5940                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5941                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5942                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5943         }
5944
5945         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5946         return 0;
5947 }
5948
5949 /**
5950  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5951  * @ioa_cfg:    ioa config struct
5952  * @ipr_cmd:    ipr command struct
5953  *
5954  * Return value:
5955  *      0 on success / -1 on failure
5956  **/
5957 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5958                            struct ipr_cmnd *ipr_cmd)
5959 {
5960         int i, nseg;
5961         struct scatterlist *sg;
5962         u32 length;
5963         u32 ioadl_flags = 0;
5964         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5965         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5966         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5967
5968         length = scsi_bufflen(scsi_cmd);
5969         if (!length)
5970                 return 0;
5971
5972         nseg = scsi_dma_map(scsi_cmd);
5973         if (nseg < 0) {
5974                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5975                 return -1;
5976         }
5977
5978         ipr_cmd->dma_use_sg = nseg;
5979
5980         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5981                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5982                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5983                 ioarcb->data_transfer_length = cpu_to_be32(length);
5984                 ioarcb->ioadl_len =
5985                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5986         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5987                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5988                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5989                 ioarcb->read_ioadl_len =
5990                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5991         }
5992
5993         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5994                 ioadl = ioarcb->u.add_data.u.ioadl;
5995                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5996                                     offsetof(struct ipr_ioarcb, u.add_data));
5997                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5998         }
5999
6000         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6001                 ioadl[i].flags_and_data_len =
6002                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6003                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6004         }
6005
6006         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6007         return 0;
6008 }
6009
6010 /**
6011  * __ipr_erp_done - Process completion of ERP for a device
6012  * @ipr_cmd:            ipr command struct
6013  *
6014  * This function copies the sense buffer into the scsi_cmd
6015  * struct and pushes the scsi_done function.
6016  *
6017  * Return value:
6018  *      nothing
6019  **/
6020 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6021 {
6022         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6023         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6024         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6025
6026         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6027                 scsi_cmd->result |= (DID_ERROR << 16);
6028                 scmd_printk(KERN_ERR, scsi_cmd,
6029                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
6030         } else {
6031                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6032                        SCSI_SENSE_BUFFERSIZE);
6033         }
6034
6035         if (res) {
6036                 if (!ipr_is_naca_model(res))
6037                         res->needs_sync_complete = 1;
6038                 res->in_erp = 0;
6039         }
6040         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6041         scsi_cmd->scsi_done(scsi_cmd);
6042         if (ipr_cmd->eh_comp)
6043                 complete(ipr_cmd->eh_comp);
6044         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6045 }
6046
6047 /**
6048  * ipr_erp_done - Process completion of ERP for a device
6049  * @ipr_cmd:            ipr command struct
6050  *
6051  * This function copies the sense buffer into the scsi_cmd
6052  * struct and pushes the scsi_done function.
6053  *
6054  * Return value:
6055  *      nothing
6056  **/
6057 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6058 {
6059         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6060         unsigned long hrrq_flags;
6061
6062         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6063         __ipr_erp_done(ipr_cmd);
6064         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6065 }
6066
6067 /**
6068  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6069  * @ipr_cmd:    ipr command struct
6070  *
6071  * Return value:
6072  *      none
6073  **/
6074 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6075 {
6076         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6077         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6078         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6079
6080         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
6081         ioarcb->data_transfer_length = 0;
6082         ioarcb->read_data_transfer_length = 0;
6083         ioarcb->ioadl_len = 0;
6084         ioarcb->read_ioadl_len = 0;
6085         ioasa->hdr.ioasc = 0;
6086         ioasa->hdr.residual_data_len = 0;
6087
6088         if (ipr_cmd->ioa_cfg->sis64)
6089                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6090                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6091         else {
6092                 ioarcb->write_ioadl_addr =
6093                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6094                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6095         }
6096 }
6097
6098 /**
6099  * __ipr_erp_request_sense - Send request sense to a device
6100  * @ipr_cmd:    ipr command struct
6101  *
6102  * This function sends a request sense to a device as a result
6103  * of a check condition.
6104  *
6105  * Return value:
6106  *      nothing
6107  **/
6108 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6109 {
6110         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6111         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6112
6113         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6114                 __ipr_erp_done(ipr_cmd);
6115                 return;
6116         }
6117
6118         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6119
6120         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6121         cmd_pkt->cdb[0] = REQUEST_SENSE;
6122         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6123         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6124         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6125         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6126
6127         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6128                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
6129
6130         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6131                    IPR_REQUEST_SENSE_TIMEOUT * 2);
6132 }
6133
6134 /**
6135  * ipr_erp_request_sense - Send request sense to a device
6136  * @ipr_cmd:    ipr command struct
6137  *
6138  * This function sends a request sense to a device as a result
6139  * of a check condition.
6140  *
6141  * Return value:
6142  *      nothing
6143  **/
6144 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6145 {
6146         struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6147         unsigned long hrrq_flags;
6148
6149         spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6150         __ipr_erp_request_sense(ipr_cmd);
6151         spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6152 }
6153
6154 /**
6155  * ipr_erp_cancel_all - Send cancel all to a device
6156  * @ipr_cmd:    ipr command struct
6157  *
6158  * This function sends a cancel all to a device to clear the
6159  * queue. If we are running TCQ on the device, QERR is set to 1,
6160  * which means all outstanding ops have been dropped on the floor.
6161  * Cancel all will return them to us.
6162  *
6163  * Return value:
6164  *      nothing
6165  **/
6166 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6167 {
6168         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6169         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6170         struct ipr_cmd_pkt *cmd_pkt;
6171
6172         res->in_erp = 1;
6173
6174         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6175
6176         if (!scsi_cmd->device->simple_tags) {
6177                 __ipr_erp_request_sense(ipr_cmd);
6178                 return;
6179         }
6180
6181         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6182         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6183         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6184
6185         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6186                    IPR_CANCEL_ALL_TIMEOUT);
6187 }
6188
6189 /**
6190  * ipr_dump_ioasa - Dump contents of IOASA
6191  * @ioa_cfg:    ioa config struct
6192  * @ipr_cmd:    ipr command struct
6193  * @res:                resource entry struct
6194  *
6195  * This function is invoked by the interrupt handler when ops
6196  * fail. It will log the IOASA if appropriate. Only called
6197  * for GPDD ops.
6198  *
6199  * Return value:
6200  *      none
6201  **/
6202 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
6203                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
6204 {
6205         int i;
6206         u16 data_len;
6207         u32 ioasc, fd_ioasc;
6208         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6209         __be32 *ioasa_data = (__be32 *)ioasa;
6210         int error_index;
6211
6212         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6213         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6214
6215         if (0 == ioasc)
6216                 return;
6217
6218         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6219                 return;
6220
6221         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6222                 error_index = ipr_get_error(fd_ioasc);
6223         else
6224                 error_index = ipr_get_error(ioasc);
6225
6226         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6227                 /* Don't log an error if the IOA already logged one */
6228                 if (ioasa->hdr.ilid != 0)
6229                         return;
6230
6231                 if (!ipr_is_gscsi(res))
6232                         return;
6233
6234                 if (ipr_error_table[error_index].log_ioasa == 0)
6235                         return;
6236         }
6237
6238         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6239
6240         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6241         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6242                 data_len = sizeof(struct ipr_ioasa64);
6243         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6244                 data_len = sizeof(struct ipr_ioasa);
6245
6246         ipr_err("IOASA Dump:\n");
6247
6248         for (i = 0; i < data_len / 4; i += 4) {
6249                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6250                         be32_to_cpu(ioasa_data[i]),
6251                         be32_to_cpu(ioasa_data[i+1]),
6252                         be32_to_cpu(ioasa_data[i+2]),
6253                         be32_to_cpu(ioasa_data[i+3]));
6254         }
6255 }
6256
6257 /**
6258  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6259  * @ioasa:              IOASA
6260  * @sense_buf:  sense data buffer
6261  *
6262  * Return value:
6263  *      none
6264  **/
6265 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6266 {
6267         u32 failing_lba;
6268         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6269         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6270         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6271         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6272
6273         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6274
6275         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6276                 return;
6277
6278         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6279
6280         if (ipr_is_vset_device(res) &&
6281             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6282             ioasa->u.vset.failing_lba_hi != 0) {
6283                 sense_buf[0] = 0x72;
6284                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6285                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6286                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6287
6288                 sense_buf[7] = 12;
6289                 sense_buf[8] = 0;
6290                 sense_buf[9] = 0x0A;
6291                 sense_buf[10] = 0x80;
6292
6293                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6294
6295                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6296                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6297                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6298                 sense_buf[15] = failing_lba & 0x000000ff;
6299
6300                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6301
6302                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6303                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6304                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6305                 sense_buf[19] = failing_lba & 0x000000ff;
6306         } else {
6307                 sense_buf[0] = 0x70;
6308                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6309                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6310                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6311
6312                 /* Illegal request */
6313                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6314                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6315                         sense_buf[7] = 10;      /* additional length */
6316
6317                         /* IOARCB was in error */
6318                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6319                                 sense_buf[15] = 0xC0;
6320                         else    /* Parameter data was invalid */
6321                                 sense_buf[15] = 0x80;
6322
6323                         sense_buf[16] =
6324                             ((IPR_FIELD_POINTER_MASK &
6325                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6326                         sense_buf[17] =
6327                             (IPR_FIELD_POINTER_MASK &
6328                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6329                 } else {
6330                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6331                                 if (ipr_is_vset_device(res))
6332                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6333                                 else
6334                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6335
6336                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6337                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6338                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6339                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6340                                 sense_buf[6] = failing_lba & 0x000000ff;
6341                         }
6342
6343                         sense_buf[7] = 6;       /* additional length */
6344                 }
6345         }
6346 }
6347
6348 /**
6349  * ipr_get_autosense - Copy autosense data to sense buffer
6350  * @ipr_cmd:    ipr command struct
6351  *
6352  * This function copies the autosense buffer to the buffer
6353  * in the scsi_cmd, if there is autosense available.
6354  *
6355  * Return value:
6356  *      1 if autosense was available / 0 if not
6357  **/
6358 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6359 {
6360         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6361         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6362
6363         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6364                 return 0;
6365
6366         if (ipr_cmd->ioa_cfg->sis64)
6367                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6368                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6369                            SCSI_SENSE_BUFFERSIZE));
6370         else
6371                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6372                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6373                            SCSI_SENSE_BUFFERSIZE));
6374         return 1;
6375 }
6376
6377 /**
6378  * ipr_erp_start - Process an error response for a SCSI op
6379  * @ioa_cfg:    ioa config struct
6380  * @ipr_cmd:    ipr command struct
6381  *
6382  * This function determines whether or not to initiate ERP
6383  * on the affected device.
6384  *
6385  * Return value:
6386  *      nothing
6387  **/
6388 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6389                               struct ipr_cmnd *ipr_cmd)
6390 {
6391         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6392         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6393         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6394         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6395
6396         if (!res) {
6397                 __ipr_scsi_eh_done(ipr_cmd);
6398                 return;
6399         }
6400
6401         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6402                 ipr_gen_sense(ipr_cmd);
6403
6404         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6405
6406         switch (masked_ioasc) {
6407         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6408                 if (ipr_is_naca_model(res))
6409                         scsi_cmd->result |= (DID_ABORT << 16);
6410                 else
6411                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6412                 break;
6413         case IPR_IOASC_IR_RESOURCE_HANDLE:
6414         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6415                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6416                 break;
6417         case IPR_IOASC_HW_SEL_TIMEOUT:
6418                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6419                 if (!ipr_is_naca_model(res))
6420                         res->needs_sync_complete = 1;
6421                 break;
6422         case IPR_IOASC_SYNC_REQUIRED:
6423                 if (!res->in_erp)
6424                         res->needs_sync_complete = 1;
6425                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6426                 break;
6427         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6428         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6429                 /*
6430                  * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6431                  * so SCSI mid-layer and upper layers handle it accordingly.
6432                  */
6433                 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6434                         scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6435                 break;
6436         case IPR_IOASC_BUS_WAS_RESET:
6437         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6438                 /*
6439                  * Report the bus reset and ask for a retry. The device
6440                  * will give CC/UA the next command.
6441                  */
6442                 if (!res->resetting_device)
6443                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6444                 scsi_cmd->result |= (DID_ERROR << 16);
6445                 if (!ipr_is_naca_model(res))
6446                         res->needs_sync_complete = 1;
6447                 break;
6448         case IPR_IOASC_HW_DEV_BUS_STATUS:
6449                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6450                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6451                         if (!ipr_get_autosense(ipr_cmd)) {
6452                                 if (!ipr_is_naca_model(res)) {
6453                                         ipr_erp_cancel_all(ipr_cmd);
6454                                         return;
6455                                 }
6456                         }
6457                 }
6458                 if (!ipr_is_naca_model(res))
6459                         res->needs_sync_complete = 1;
6460                 break;
6461         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6462                 break;
6463         case IPR_IOASC_IR_NON_OPTIMIZED:
6464                 if (res->raw_mode) {
6465                         res->raw_mode = 0;
6466                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6467                 } else
6468                         scsi_cmd->result |= (DID_ERROR << 16);
6469                 break;
6470         default:
6471                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6472                         scsi_cmd->result |= (DID_ERROR << 16);
6473                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6474                         res->needs_sync_complete = 1;
6475                 break;
6476         }
6477
6478         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6479         scsi_cmd->scsi_done(scsi_cmd);
6480         if (ipr_cmd->eh_comp)
6481                 complete(ipr_cmd->eh_comp);
6482         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6483 }
6484
6485 /**
6486  * ipr_scsi_done - mid-layer done function
6487  * @ipr_cmd:    ipr command struct
6488  *
6489  * This function is invoked by the interrupt handler for
6490  * ops generated by the SCSI mid-layer
6491  *
6492  * Return value:
6493  *      none
6494  **/
6495 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6496 {
6497         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6498         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6499         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6500         unsigned long lock_flags;
6501
6502         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6503
6504         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6505                 scsi_dma_unmap(scsi_cmd);
6506
6507                 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6508                 scsi_cmd->scsi_done(scsi_cmd);
6509                 if (ipr_cmd->eh_comp)
6510                         complete(ipr_cmd->eh_comp);
6511                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6512                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6513         } else {
6514                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6515                 spin_lock(&ipr_cmd->hrrq->_lock);
6516                 ipr_erp_start(ioa_cfg, ipr_cmd);
6517                 spin_unlock(&ipr_cmd->hrrq->_lock);
6518                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6519         }
6520 }
6521
6522 /**
6523  * ipr_queuecommand - Queue a mid-layer request
6524  * @shost:              scsi host struct
6525  * @scsi_cmd:   scsi command struct
6526  *
6527  * This function queues a request generated by the mid-layer.
6528  *
6529  * Return value:
6530  *      0 on success
6531  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6532  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6533  **/
6534 static int ipr_queuecommand(struct Scsi_Host *shost,
6535                             struct scsi_cmnd *scsi_cmd)
6536 {
6537         struct ipr_ioa_cfg *ioa_cfg;
6538         struct ipr_resource_entry *res;
6539         struct ipr_ioarcb *ioarcb;
6540         struct ipr_cmnd *ipr_cmd;
6541         unsigned long hrrq_flags, lock_flags;
6542         int rc;
6543         struct ipr_hrr_queue *hrrq;
6544         int hrrq_id;
6545
6546         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6547
6548         scsi_cmd->result = (DID_OK << 16);
6549         res = scsi_cmd->device->hostdata;
6550
6551         if (ipr_is_gata(res) && res->sata_port) {
6552                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6553                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6554                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6555                 return rc;
6556         }
6557
6558         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6559         hrrq = &ioa_cfg->hrrq[hrrq_id];
6560
6561         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6562         /*
6563          * We are currently blocking all devices due to a host reset
6564          * We have told the host to stop giving us new requests, but
6565          * ERP ops don't count. FIXME
6566          */
6567         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6568                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6569                 return SCSI_MLQUEUE_HOST_BUSY;
6570         }
6571
6572         /*
6573          * FIXME - Create scsi_set_host_offline interface
6574          *  and the ioa_is_dead check can be removed
6575          */
6576         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6577                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6578                 goto err_nodev;
6579         }
6580
6581         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6582         if (ipr_cmd == NULL) {
6583                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6584                 return SCSI_MLQUEUE_HOST_BUSY;
6585         }
6586         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6587
6588         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6589         ioarcb = &ipr_cmd->ioarcb;
6590
6591         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6592         ipr_cmd->scsi_cmd = scsi_cmd;
6593         ipr_cmd->done = ipr_scsi_eh_done;
6594
6595         if (ipr_is_gscsi(res)) {
6596                 if (scsi_cmd->underflow == 0)
6597                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6598
6599                 if (res->reset_occurred) {
6600                         res->reset_occurred = 0;
6601                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6602                 }
6603         }
6604
6605         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6606                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6607
6608                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6609                 if (scsi_cmd->flags & SCMD_TAGGED)
6610                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6611                 else
6612                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6613         }
6614
6615         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6616             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6617                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6618         }
6619         if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6620                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6621
6622                 if (scsi_cmd->underflow == 0)
6623                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6624         }
6625
6626         if (ioa_cfg->sis64)
6627                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6628         else
6629                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6630
6631         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6632         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6633                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6634                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6635                 if (!rc)
6636                         scsi_dma_unmap(scsi_cmd);
6637                 return SCSI_MLQUEUE_HOST_BUSY;
6638         }
6639
6640         if (unlikely(hrrq->ioa_is_dead)) {
6641                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6642                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6643                 scsi_dma_unmap(scsi_cmd);
6644                 goto err_nodev;
6645         }
6646
6647         ioarcb->res_handle = res->res_handle;
6648         if (res->needs_sync_complete) {
6649                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6650                 res->needs_sync_complete = 0;
6651         }
6652         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6653         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6654         ipr_send_command(ipr_cmd);
6655         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6656         return 0;
6657
6658 err_nodev:
6659         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6660         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6661         scsi_cmd->result = (DID_NO_CONNECT << 16);
6662         scsi_cmd->scsi_done(scsi_cmd);
6663         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6664         return 0;
6665 }
6666
6667 /**
6668  * ipr_ioctl - IOCTL handler
6669  * @sdev:       scsi device struct
6670  * @cmd:        IOCTL cmd
6671  * @arg:        IOCTL arg
6672  *
6673  * Return value:
6674  *      0 on success / other on failure
6675  **/
6676 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6677 {
6678         struct ipr_resource_entry *res;
6679
6680         res = (struct ipr_resource_entry *)sdev->hostdata;
6681         if (res && ipr_is_gata(res)) {
6682                 if (cmd == HDIO_GET_IDENTITY)
6683                         return -ENOTTY;
6684                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6685         }
6686
6687         return -EINVAL;
6688 }
6689
6690 /**
6691  * ipr_info - Get information about the card/driver
6692  * @scsi_host:  scsi host struct
6693  *
6694  * Return value:
6695  *      pointer to buffer with description string
6696  **/
6697 static const char *ipr_ioa_info(struct Scsi_Host *host)
6698 {
6699         static char buffer[512];
6700         struct ipr_ioa_cfg *ioa_cfg;
6701         unsigned long lock_flags = 0;
6702
6703         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6704
6705         spin_lock_irqsave(host->host_lock, lock_flags);
6706         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6707         spin_unlock_irqrestore(host->host_lock, lock_flags);
6708
6709         return buffer;
6710 }
6711
6712 static struct scsi_host_template driver_template = {
6713         .module = THIS_MODULE,
6714         .name = "IPR",
6715         .info = ipr_ioa_info,
6716         .ioctl = ipr_ioctl,
6717         .queuecommand = ipr_queuecommand,
6718         .eh_abort_handler = ipr_eh_abort,
6719         .eh_device_reset_handler = ipr_eh_dev_reset,
6720         .eh_host_reset_handler = ipr_eh_host_reset,
6721         .slave_alloc = ipr_slave_alloc,
6722         .slave_configure = ipr_slave_configure,
6723         .slave_destroy = ipr_slave_destroy,
6724         .scan_finished = ipr_scan_finished,
6725         .target_alloc = ipr_target_alloc,
6726         .target_destroy = ipr_target_destroy,
6727         .change_queue_depth = ipr_change_queue_depth,
6728         .bios_param = ipr_biosparam,
6729         .can_queue = IPR_MAX_COMMANDS,
6730         .this_id = -1,
6731         .sg_tablesize = IPR_MAX_SGLIST,
6732         .max_sectors = IPR_IOA_MAX_SECTORS,
6733         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6734         .use_clustering = ENABLE_CLUSTERING,
6735         .shost_attrs = ipr_ioa_attrs,
6736         .sdev_attrs = ipr_dev_attrs,
6737         .proc_name = IPR_NAME,
6738 };
6739
6740 /**
6741  * ipr_ata_phy_reset - libata phy_reset handler
6742  * @ap:         ata port to reset
6743  *
6744  **/
6745 static void ipr_ata_phy_reset(struct ata_port *ap)
6746 {
6747         unsigned long flags;
6748         struct ipr_sata_port *sata_port = ap->private_data;
6749         struct ipr_resource_entry *res = sata_port->res;
6750         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6751         int rc;
6752
6753         ENTER;
6754         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6755         while (ioa_cfg->in_reset_reload) {
6756                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6757                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6758                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6759         }
6760
6761         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6762                 goto out_unlock;
6763
6764         rc = ipr_device_reset(ioa_cfg, res);
6765
6766         if (rc) {
6767                 ap->link.device[0].class = ATA_DEV_NONE;
6768                 goto out_unlock;
6769         }
6770
6771         ap->link.device[0].class = res->ata_class;
6772         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6773                 ap->link.device[0].class = ATA_DEV_NONE;
6774
6775 out_unlock:
6776         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6777         LEAVE;
6778 }
6779
6780 /**
6781  * ipr_ata_post_internal - Cleanup after an internal command
6782  * @qc: ATA queued command
6783  *
6784  * Return value:
6785  *      none
6786  **/
6787 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6788 {
6789         struct ipr_sata_port *sata_port = qc->ap->private_data;
6790         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6791         struct ipr_cmnd *ipr_cmd;
6792         struct ipr_hrr_queue *hrrq;
6793         unsigned long flags;
6794
6795         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6796         while (ioa_cfg->in_reset_reload) {
6797                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6798                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6799                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6800         }
6801
6802         for_each_hrrq(hrrq, ioa_cfg) {
6803                 spin_lock(&hrrq->_lock);
6804                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6805                         if (ipr_cmd->qc == qc) {
6806                                 ipr_device_reset(ioa_cfg, sata_port->res);
6807                                 break;
6808                         }
6809                 }
6810                 spin_unlock(&hrrq->_lock);
6811         }
6812         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6813 }
6814
6815 /**
6816  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6817  * @regs:       destination
6818  * @tf: source ATA taskfile
6819  *
6820  * Return value:
6821  *      none
6822  **/
6823 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6824                              struct ata_taskfile *tf)
6825 {
6826         regs->feature = tf->feature;
6827         regs->nsect = tf->nsect;
6828         regs->lbal = tf->lbal;
6829         regs->lbam = tf->lbam;
6830         regs->lbah = tf->lbah;
6831         regs->device = tf->device;
6832         regs->command = tf->command;
6833         regs->hob_feature = tf->hob_feature;
6834         regs->hob_nsect = tf->hob_nsect;
6835         regs->hob_lbal = tf->hob_lbal;
6836         regs->hob_lbam = tf->hob_lbam;
6837         regs->hob_lbah = tf->hob_lbah;
6838         regs->ctl = tf->ctl;
6839 }
6840
6841 /**
6842  * ipr_sata_done - done function for SATA commands
6843  * @ipr_cmd:    ipr command struct
6844  *
6845  * This function is invoked by the interrupt handler for
6846  * ops generated by the SCSI mid-layer to SATA devices
6847  *
6848  * Return value:
6849  *      none
6850  **/
6851 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6852 {
6853         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6854         struct ata_queued_cmd *qc = ipr_cmd->qc;
6855         struct ipr_sata_port *sata_port = qc->ap->private_data;
6856         struct ipr_resource_entry *res = sata_port->res;
6857         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6858
6859         spin_lock(&ipr_cmd->hrrq->_lock);
6860         if (ipr_cmd->ioa_cfg->sis64)
6861                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6862                        sizeof(struct ipr_ioasa_gata));
6863         else
6864                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6865                        sizeof(struct ipr_ioasa_gata));
6866         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6867
6868         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6869                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6870
6871         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6872                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6873         else
6874                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6875         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6876         spin_unlock(&ipr_cmd->hrrq->_lock);
6877         ata_qc_complete(qc);
6878 }
6879
6880 /**
6881  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6882  * @ipr_cmd:    ipr command struct
6883  * @qc:         ATA queued command
6884  *
6885  **/
6886 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6887                                   struct ata_queued_cmd *qc)
6888 {
6889         u32 ioadl_flags = 0;
6890         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6891         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6892         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6893         int len = qc->nbytes;
6894         struct scatterlist *sg;
6895         unsigned int si;
6896         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6897
6898         if (len == 0)
6899                 return;
6900
6901         if (qc->dma_dir == DMA_TO_DEVICE) {
6902                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6903                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6904         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6905                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6906
6907         ioarcb->data_transfer_length = cpu_to_be32(len);
6908         ioarcb->ioadl_len =
6909                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6910         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6911                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6912
6913         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6914                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6915                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6916                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6917
6918                 last_ioadl64 = ioadl64;
6919                 ioadl64++;
6920         }
6921
6922         if (likely(last_ioadl64))
6923                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6924 }
6925
6926 /**
6927  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6928  * @ipr_cmd:    ipr command struct
6929  * @qc:         ATA queued command
6930  *
6931  **/
6932 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6933                                 struct ata_queued_cmd *qc)
6934 {
6935         u32 ioadl_flags = 0;
6936         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6937         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6938         struct ipr_ioadl_desc *last_ioadl = NULL;
6939         int len = qc->nbytes;
6940         struct scatterlist *sg;
6941         unsigned int si;
6942
6943         if (len == 0)
6944                 return;
6945
6946         if (qc->dma_dir == DMA_TO_DEVICE) {
6947                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6948                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6949                 ioarcb->data_transfer_length = cpu_to_be32(len);
6950                 ioarcb->ioadl_len =
6951                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6952         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6953                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6954                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6955                 ioarcb->read_ioadl_len =
6956                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6957         }
6958
6959         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6960                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6961                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6962
6963                 last_ioadl = ioadl;
6964                 ioadl++;
6965         }
6966
6967         if (likely(last_ioadl))
6968                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6969 }
6970
6971 /**
6972  * ipr_qc_defer - Get a free ipr_cmd
6973  * @qc: queued command
6974  *
6975  * Return value:
6976  *      0 if success
6977  **/
6978 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6979 {
6980         struct ata_port *ap = qc->ap;
6981         struct ipr_sata_port *sata_port = ap->private_data;
6982         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6983         struct ipr_cmnd *ipr_cmd;
6984         struct ipr_hrr_queue *hrrq;
6985         int hrrq_id;
6986
6987         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6988         hrrq = &ioa_cfg->hrrq[hrrq_id];
6989
6990         qc->lldd_task = NULL;
6991         spin_lock(&hrrq->_lock);
6992         if (unlikely(hrrq->ioa_is_dead)) {
6993                 spin_unlock(&hrrq->_lock);
6994                 return 0;
6995         }
6996
6997         if (unlikely(!hrrq->allow_cmds)) {
6998                 spin_unlock(&hrrq->_lock);
6999                 return ATA_DEFER_LINK;
7000         }
7001
7002         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7003         if (ipr_cmd == NULL) {
7004                 spin_unlock(&hrrq->_lock);
7005                 return ATA_DEFER_LINK;
7006         }
7007
7008         qc->lldd_task = ipr_cmd;
7009         spin_unlock(&hrrq->_lock);
7010         return 0;
7011 }
7012
7013 /**
7014  * ipr_qc_issue - Issue a SATA qc to a device
7015  * @qc: queued command
7016  *
7017  * Return value:
7018  *      0 if success
7019  **/
7020 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7021 {
7022         struct ata_port *ap = qc->ap;
7023         struct ipr_sata_port *sata_port = ap->private_data;
7024         struct ipr_resource_entry *res = sata_port->res;
7025         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7026         struct ipr_cmnd *ipr_cmd;
7027         struct ipr_ioarcb *ioarcb;
7028         struct ipr_ioarcb_ata_regs *regs;
7029
7030         if (qc->lldd_task == NULL)
7031                 ipr_qc_defer(qc);
7032
7033         ipr_cmd = qc->lldd_task;
7034         if (ipr_cmd == NULL)
7035                 return AC_ERR_SYSTEM;
7036
7037         qc->lldd_task = NULL;
7038         spin_lock(&ipr_cmd->hrrq->_lock);
7039         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7040                         ipr_cmd->hrrq->ioa_is_dead)) {
7041                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7042                 spin_unlock(&ipr_cmd->hrrq->_lock);
7043                 return AC_ERR_SYSTEM;
7044         }
7045
7046         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
7047         ioarcb = &ipr_cmd->ioarcb;
7048
7049         if (ioa_cfg->sis64) {
7050                 regs = &ipr_cmd->i.ata_ioadl.regs;
7051                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7052         } else
7053                 regs = &ioarcb->u.add_data.u.regs;
7054
7055         memset(regs, 0, sizeof(*regs));
7056         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
7057
7058         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7059         ipr_cmd->qc = qc;
7060         ipr_cmd->done = ipr_sata_done;
7061         ipr_cmd->ioarcb.res_handle = res->res_handle;
7062         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7063         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7064         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
7065         ipr_cmd->dma_use_sg = qc->n_elem;
7066
7067         if (ioa_cfg->sis64)
7068                 ipr_build_ata_ioadl64(ipr_cmd, qc);
7069         else
7070                 ipr_build_ata_ioadl(ipr_cmd, qc);
7071
7072         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7073         ipr_copy_sata_tf(regs, &qc->tf);
7074         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
7075         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
7076
7077         switch (qc->tf.protocol) {
7078         case ATA_PROT_NODATA:
7079         case ATA_PROT_PIO:
7080                 break;
7081
7082         case ATA_PROT_DMA:
7083                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7084                 break;
7085
7086         case ATAPI_PROT_PIO:
7087         case ATAPI_PROT_NODATA:
7088                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7089                 break;
7090
7091         case ATAPI_PROT_DMA:
7092                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7093                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7094                 break;
7095
7096         default:
7097                 WARN_ON(1);
7098                 spin_unlock(&ipr_cmd->hrrq->_lock);
7099                 return AC_ERR_INVALID;
7100         }
7101
7102         ipr_send_command(ipr_cmd);
7103         spin_unlock(&ipr_cmd->hrrq->_lock);
7104
7105         return 0;
7106 }
7107
7108 /**
7109  * ipr_qc_fill_rtf - Read result TF
7110  * @qc: ATA queued command
7111  *
7112  * Return value:
7113  *      true
7114  **/
7115 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7116 {
7117         struct ipr_sata_port *sata_port = qc->ap->private_data;
7118         struct ipr_ioasa_gata *g = &sata_port->ioasa;
7119         struct ata_taskfile *tf = &qc->result_tf;
7120
7121         tf->feature = g->error;
7122         tf->nsect = g->nsect;
7123         tf->lbal = g->lbal;
7124         tf->lbam = g->lbam;
7125         tf->lbah = g->lbah;
7126         tf->device = g->device;
7127         tf->command = g->status;
7128         tf->hob_nsect = g->hob_nsect;
7129         tf->hob_lbal = g->hob_lbal;
7130         tf->hob_lbam = g->hob_lbam;
7131         tf->hob_lbah = g->hob_lbah;
7132
7133         return true;
7134 }
7135
7136 static struct ata_port_operations ipr_sata_ops = {
7137         .phy_reset = ipr_ata_phy_reset,
7138         .hardreset = ipr_sata_reset,
7139         .post_internal_cmd = ipr_ata_post_internal,
7140         .qc_prep = ata_noop_qc_prep,
7141         .qc_defer = ipr_qc_defer,
7142         .qc_issue = ipr_qc_issue,
7143         .qc_fill_rtf = ipr_qc_fill_rtf,
7144         .port_start = ata_sas_port_start,
7145         .port_stop = ata_sas_port_stop
7146 };
7147
7148 static struct ata_port_info sata_port_info = {
7149         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7150                           ATA_FLAG_SAS_HOST,
7151         .pio_mask       = ATA_PIO4_ONLY,
7152         .mwdma_mask     = ATA_MWDMA2,
7153         .udma_mask      = ATA_UDMA6,
7154         .port_ops       = &ipr_sata_ops
7155 };
7156
7157 #ifdef CONFIG_PPC_PSERIES
7158 static const u16 ipr_blocked_processors[] = {
7159         PVR_NORTHSTAR,
7160         PVR_PULSAR,
7161         PVR_POWER4,
7162         PVR_ICESTAR,
7163         PVR_SSTAR,
7164         PVR_POWER4p,
7165         PVR_630,
7166         PVR_630p
7167 };
7168
7169 /**
7170  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7171  * @ioa_cfg:    ioa cfg struct
7172  *
7173  * Adapters that use Gemstone revision < 3.1 do not work reliably on
7174  * certain pSeries hardware. This function determines if the given
7175  * adapter is in one of these confgurations or not.
7176  *
7177  * Return value:
7178  *      1 if adapter is not supported / 0 if adapter is supported
7179  **/
7180 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7181 {
7182         int i;
7183
7184         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
7185                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
7186                         if (pvr_version_is(ipr_blocked_processors[i]))
7187                                 return 1;
7188                 }
7189         }
7190         return 0;
7191 }
7192 #else
7193 #define ipr_invalid_adapter(ioa_cfg) 0
7194 #endif
7195
7196 /**
7197  * ipr_ioa_bringdown_done - IOA bring down completion.
7198  * @ipr_cmd:    ipr command struct
7199  *
7200  * This function processes the completion of an adapter bring down.
7201  * It wakes any reset sleepers.
7202  *
7203  * Return value:
7204  *      IPR_RC_JOB_RETURN
7205  **/
7206 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7207 {
7208         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7209         int i;
7210
7211         ENTER;
7212         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7213                 ipr_trace;
7214                 spin_unlock_irq(ioa_cfg->host->host_lock);
7215                 scsi_unblock_requests(ioa_cfg->host);
7216                 spin_lock_irq(ioa_cfg->host->host_lock);
7217         }
7218
7219         ioa_cfg->in_reset_reload = 0;
7220         ioa_cfg->reset_retries = 0;
7221         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7222                 spin_lock(&ioa_cfg->hrrq[i]._lock);
7223                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7224                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7225         }
7226         wmb();
7227
7228         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7229         wake_up_all(&ioa_cfg->reset_wait_q);
7230         LEAVE;
7231
7232         return IPR_RC_JOB_RETURN;
7233 }
7234
7235 /**
7236  * ipr_ioa_reset_done - IOA reset completion.
7237  * @ipr_cmd:    ipr command struct
7238  *
7239  * This function processes the completion of an adapter reset.
7240  * It schedules any necessary mid-layer add/removes and
7241  * wakes any reset sleepers.
7242  *
7243  * Return value:
7244  *      IPR_RC_JOB_RETURN
7245  **/
7246 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7247 {
7248         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7249         struct ipr_resource_entry *res;
7250         int j;
7251
7252         ENTER;
7253         ioa_cfg->in_reset_reload = 0;
7254         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7255                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7256                 ioa_cfg->hrrq[j].allow_cmds = 1;
7257                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7258         }
7259         wmb();
7260         ioa_cfg->reset_cmd = NULL;
7261         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7262
7263         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7264                 if (res->add_to_ml || res->del_from_ml) {
7265                         ipr_trace;
7266                         break;
7267                 }
7268         }
7269         schedule_work(&ioa_cfg->work_q);
7270
7271         for (j = 0; j < IPR_NUM_HCAMS; j++) {
7272                 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7273                 if (j < IPR_NUM_LOG_HCAMS)
7274                         ipr_send_hcam(ioa_cfg,
7275                                 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7276                                 ioa_cfg->hostrcb[j]);
7277                 else
7278                         ipr_send_hcam(ioa_cfg,
7279                                 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7280                                 ioa_cfg->hostrcb[j]);
7281         }
7282
7283         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7284         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7285
7286         ioa_cfg->reset_retries = 0;
7287         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7288         wake_up_all(&ioa_cfg->reset_wait_q);
7289
7290         spin_unlock(ioa_cfg->host->host_lock);
7291         scsi_unblock_requests(ioa_cfg->host);
7292         spin_lock(ioa_cfg->host->host_lock);
7293
7294         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7295                 scsi_block_requests(ioa_cfg->host);
7296
7297         schedule_work(&ioa_cfg->work_q);
7298         LEAVE;
7299         return IPR_RC_JOB_RETURN;
7300 }
7301
7302 /**
7303  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7304  * @supported_dev:      supported device struct
7305  * @vpids:                      vendor product id struct
7306  *
7307  * Return value:
7308  *      none
7309  **/
7310 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7311                                  struct ipr_std_inq_vpids *vpids)
7312 {
7313         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7314         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7315         supported_dev->num_records = 1;
7316         supported_dev->data_length =
7317                 cpu_to_be16(sizeof(struct ipr_supported_device));
7318         supported_dev->reserved = 0;
7319 }
7320
7321 /**
7322  * ipr_set_supported_devs - Send Set Supported Devices for a device
7323  * @ipr_cmd:    ipr command struct
7324  *
7325  * This function sends a Set Supported Devices to the adapter
7326  *
7327  * Return value:
7328  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7329  **/
7330 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7331 {
7332         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7333         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7334         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7335         struct ipr_resource_entry *res = ipr_cmd->u.res;
7336
7337         ipr_cmd->job_step = ipr_ioa_reset_done;
7338
7339         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7340                 if (!ipr_is_scsi_disk(res))
7341                         continue;
7342
7343                 ipr_cmd->u.res = res;
7344                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7345
7346                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7347                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7348                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7349
7350                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7351                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7352                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7353                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7354
7355                 ipr_init_ioadl(ipr_cmd,
7356                                ioa_cfg->vpd_cbs_dma +
7357                                  offsetof(struct ipr_misc_cbs, supp_dev),
7358                                sizeof(struct ipr_supported_device),
7359                                IPR_IOADL_FLAGS_WRITE_LAST);
7360
7361                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7362                            IPR_SET_SUP_DEVICE_TIMEOUT);
7363
7364                 if (!ioa_cfg->sis64)
7365                         ipr_cmd->job_step = ipr_set_supported_devs;
7366                 LEAVE;
7367                 return IPR_RC_JOB_RETURN;
7368         }
7369
7370         LEAVE;
7371         return IPR_RC_JOB_CONTINUE;
7372 }
7373
7374 /**
7375  * ipr_get_mode_page - Locate specified mode page
7376  * @mode_pages: mode page buffer
7377  * @page_code:  page code to find
7378  * @len:                minimum required length for mode page
7379  *
7380  * Return value:
7381  *      pointer to mode page / NULL on failure
7382  **/
7383 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7384                                u32 page_code, u32 len)
7385 {
7386         struct ipr_mode_page_hdr *mode_hdr;
7387         u32 page_length;
7388         u32 length;
7389
7390         if (!mode_pages || (mode_pages->hdr.length == 0))
7391                 return NULL;
7392
7393         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7394         mode_hdr = (struct ipr_mode_page_hdr *)
7395                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7396
7397         while (length) {
7398                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7399                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7400                                 return mode_hdr;
7401                         break;
7402                 } else {
7403                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7404                                        mode_hdr->page_length);
7405                         length -= page_length;
7406                         mode_hdr = (struct ipr_mode_page_hdr *)
7407                                 ((unsigned long)mode_hdr + page_length);
7408                 }
7409         }
7410         return NULL;
7411 }
7412
7413 /**
7414  * ipr_check_term_power - Check for term power errors
7415  * @ioa_cfg:    ioa config struct
7416  * @mode_pages: IOAFP mode pages buffer
7417  *
7418  * Check the IOAFP's mode page 28 for term power errors
7419  *
7420  * Return value:
7421  *      nothing
7422  **/
7423 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7424                                  struct ipr_mode_pages *mode_pages)
7425 {
7426         int i;
7427         int entry_length;
7428         struct ipr_dev_bus_entry *bus;
7429         struct ipr_mode_page28 *mode_page;
7430
7431         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7432                                       sizeof(struct ipr_mode_page28));
7433
7434         entry_length = mode_page->entry_length;
7435
7436         bus = mode_page->bus;
7437
7438         for (i = 0; i < mode_page->num_entries; i++) {
7439                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7440                         dev_err(&ioa_cfg->pdev->dev,
7441                                 "Term power is absent on scsi bus %d\n",
7442                                 bus->res_addr.bus);
7443                 }
7444
7445                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7446         }
7447 }
7448
7449 /**
7450  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7451  * @ioa_cfg:    ioa config struct
7452  *
7453  * Looks through the config table checking for SES devices. If
7454  * the SES device is in the SES table indicating a maximum SCSI
7455  * bus speed, the speed is limited for the bus.
7456  *
7457  * Return value:
7458  *      none
7459  **/
7460 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7461 {
7462         u32 max_xfer_rate;
7463         int i;
7464
7465         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7466                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7467                                                        ioa_cfg->bus_attr[i].bus_width);
7468
7469                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7470                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7471         }
7472 }
7473
7474 /**
7475  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7476  * @ioa_cfg:    ioa config struct
7477  * @mode_pages: mode page 28 buffer
7478  *
7479  * Updates mode page 28 based on driver configuration
7480  *
7481  * Return value:
7482  *      none
7483  **/
7484 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7485                                           struct ipr_mode_pages *mode_pages)
7486 {
7487         int i, entry_length;
7488         struct ipr_dev_bus_entry *bus;
7489         struct ipr_bus_attributes *bus_attr;
7490         struct ipr_mode_page28 *mode_page;
7491
7492         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7493                                       sizeof(struct ipr_mode_page28));
7494
7495         entry_length = mode_page->entry_length;
7496
7497         /* Loop for each device bus entry */
7498         for (i = 0, bus = mode_page->bus;
7499              i < mode_page->num_entries;
7500              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7501                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7502                         dev_err(&ioa_cfg->pdev->dev,
7503                                 "Invalid resource address reported: 0x%08X\n",
7504                                 IPR_GET_PHYS_LOC(bus->res_addr));
7505                         continue;
7506                 }
7507
7508                 bus_attr = &ioa_cfg->bus_attr[i];
7509                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7510                 bus->bus_width = bus_attr->bus_width;
7511                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7512                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7513                 if (bus_attr->qas_enabled)
7514                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7515                 else
7516                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7517         }
7518 }
7519
7520 /**
7521  * ipr_build_mode_select - Build a mode select command
7522  * @ipr_cmd:    ipr command struct
7523  * @res_handle: resource handle to send command to
7524  * @parm:               Byte 2 of Mode Sense command
7525  * @dma_addr:   DMA buffer address
7526  * @xfer_len:   data transfer length
7527  *
7528  * Return value:
7529  *      none
7530  **/
7531 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7532                                   __be32 res_handle, u8 parm,
7533                                   dma_addr_t dma_addr, u8 xfer_len)
7534 {
7535         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7536
7537         ioarcb->res_handle = res_handle;
7538         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7539         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7540         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7541         ioarcb->cmd_pkt.cdb[1] = parm;
7542         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7543
7544         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7545 }
7546
7547 /**
7548  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7549  * @ipr_cmd:    ipr command struct
7550  *
7551  * This function sets up the SCSI bus attributes and sends
7552  * a Mode Select for Page 28 to activate them.
7553  *
7554  * Return value:
7555  *      IPR_RC_JOB_RETURN
7556  **/
7557 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7558 {
7559         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7560         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7561         int length;
7562
7563         ENTER;
7564         ipr_scsi_bus_speed_limit(ioa_cfg);
7565         ipr_check_term_power(ioa_cfg, mode_pages);
7566         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7567         length = mode_pages->hdr.length + 1;
7568         mode_pages->hdr.length = 0;
7569
7570         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7571                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7572                               length);
7573
7574         ipr_cmd->job_step = ipr_set_supported_devs;
7575         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7576                                     struct ipr_resource_entry, queue);
7577         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7578
7579         LEAVE;
7580         return IPR_RC_JOB_RETURN;
7581 }
7582
7583 /**
7584  * ipr_build_mode_sense - Builds a mode sense command
7585  * @ipr_cmd:    ipr command struct
7586  * @res:                resource entry struct
7587  * @parm:               Byte 2 of mode sense command
7588  * @dma_addr:   DMA address of mode sense buffer
7589  * @xfer_len:   Size of DMA buffer
7590  *
7591  * Return value:
7592  *      none
7593  **/
7594 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7595                                  __be32 res_handle,
7596                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7597 {
7598         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7599
7600         ioarcb->res_handle = res_handle;
7601         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7602         ioarcb->cmd_pkt.cdb[2] = parm;
7603         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7604         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7605
7606         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7607 }
7608
7609 /**
7610  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7611  * @ipr_cmd:    ipr command struct
7612  *
7613  * This function handles the failure of an IOA bringup command.
7614  *
7615  * Return value:
7616  *      IPR_RC_JOB_RETURN
7617  **/
7618 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7619 {
7620         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7621         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7622
7623         dev_err(&ioa_cfg->pdev->dev,
7624                 "0x%02X failed with IOASC: 0x%08X\n",
7625                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7626
7627         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7628         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7629         return IPR_RC_JOB_RETURN;
7630 }
7631
7632 /**
7633  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7634  * @ipr_cmd:    ipr command struct
7635  *
7636  * This function handles the failure of a Mode Sense to the IOAFP.
7637  * Some adapters do not handle all mode pages.
7638  *
7639  * Return value:
7640  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7641  **/
7642 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7643 {
7644         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7645         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7646
7647         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7648                 ipr_cmd->job_step = ipr_set_supported_devs;
7649                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7650                                             struct ipr_resource_entry, queue);
7651                 return IPR_RC_JOB_CONTINUE;
7652         }
7653
7654         return ipr_reset_cmd_failed(ipr_cmd);
7655 }
7656
7657 /**
7658  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7659  * @ipr_cmd:    ipr command struct
7660  *
7661  * This function send a Page 28 mode sense to the IOA to
7662  * retrieve SCSI bus attributes.
7663  *
7664  * Return value:
7665  *      IPR_RC_JOB_RETURN
7666  **/
7667 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7668 {
7669         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7670
7671         ENTER;
7672         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7673                              0x28, ioa_cfg->vpd_cbs_dma +
7674                              offsetof(struct ipr_misc_cbs, mode_pages),
7675                              sizeof(struct ipr_mode_pages));
7676
7677         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7678         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7679
7680         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7681
7682         LEAVE;
7683         return IPR_RC_JOB_RETURN;
7684 }
7685
7686 /**
7687  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7688  * @ipr_cmd:    ipr command struct
7689  *
7690  * This function enables dual IOA RAID support if possible.
7691  *
7692  * Return value:
7693  *      IPR_RC_JOB_RETURN
7694  **/
7695 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7696 {
7697         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7698         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7699         struct ipr_mode_page24 *mode_page;
7700         int length;
7701
7702         ENTER;
7703         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7704                                       sizeof(struct ipr_mode_page24));
7705
7706         if (mode_page)
7707                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7708
7709         length = mode_pages->hdr.length + 1;
7710         mode_pages->hdr.length = 0;
7711
7712         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7713                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7714                               length);
7715
7716         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7717         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7718
7719         LEAVE;
7720         return IPR_RC_JOB_RETURN;
7721 }
7722
7723 /**
7724  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7725  * @ipr_cmd:    ipr command struct
7726  *
7727  * This function handles the failure of a Mode Sense to the IOAFP.
7728  * Some adapters do not handle all mode pages.
7729  *
7730  * Return value:
7731  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7732  **/
7733 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7734 {
7735         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7736
7737         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7738                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7739                 return IPR_RC_JOB_CONTINUE;
7740         }
7741
7742         return ipr_reset_cmd_failed(ipr_cmd);
7743 }
7744
7745 /**
7746  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7747  * @ipr_cmd:    ipr command struct
7748  *
7749  * This function send a mode sense to the IOA to retrieve
7750  * the IOA Advanced Function Control mode page.
7751  *
7752  * Return value:
7753  *      IPR_RC_JOB_RETURN
7754  **/
7755 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7756 {
7757         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7758
7759         ENTER;
7760         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7761                              0x24, ioa_cfg->vpd_cbs_dma +
7762                              offsetof(struct ipr_misc_cbs, mode_pages),
7763                              sizeof(struct ipr_mode_pages));
7764
7765         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7766         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7767
7768         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7769
7770         LEAVE;
7771         return IPR_RC_JOB_RETURN;
7772 }
7773
7774 /**
7775  * ipr_init_res_table - Initialize the resource table
7776  * @ipr_cmd:    ipr command struct
7777  *
7778  * This function looks through the existing resource table, comparing
7779  * it with the config table. This function will take care of old/new
7780  * devices and schedule adding/removing them from the mid-layer
7781  * as appropriate.
7782  *
7783  * Return value:
7784  *      IPR_RC_JOB_CONTINUE
7785  **/
7786 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7787 {
7788         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7789         struct ipr_resource_entry *res, *temp;
7790         struct ipr_config_table_entry_wrapper cfgtew;
7791         int entries, found, flag, i;
7792         LIST_HEAD(old_res);
7793
7794         ENTER;
7795         if (ioa_cfg->sis64)
7796                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7797         else
7798                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7799
7800         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7801                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7802
7803         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7804                 list_move_tail(&res->queue, &old_res);
7805
7806         if (ioa_cfg->sis64)
7807                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7808         else
7809                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7810
7811         for (i = 0; i < entries; i++) {
7812                 if (ioa_cfg->sis64)
7813                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7814                 else
7815                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7816                 found = 0;
7817
7818                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7819                         if (ipr_is_same_device(res, &cfgtew)) {
7820                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7821                                 found = 1;
7822                                 break;
7823                         }
7824                 }
7825
7826                 if (!found) {
7827                         if (list_empty(&ioa_cfg->free_res_q)) {
7828                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7829                                 break;
7830                         }
7831
7832                         found = 1;
7833                         res = list_entry(ioa_cfg->free_res_q.next,
7834                                          struct ipr_resource_entry, queue);
7835                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7836                         ipr_init_res_entry(res, &cfgtew);
7837                         res->add_to_ml = 1;
7838                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7839                         res->sdev->allow_restart = 1;
7840
7841                 if (found)
7842                         ipr_update_res_entry(res, &cfgtew);
7843         }
7844
7845         list_for_each_entry_safe(res, temp, &old_res, queue) {
7846                 if (res->sdev) {
7847                         res->del_from_ml = 1;
7848                         res->res_handle = IPR_INVALID_RES_HANDLE;
7849                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7850                 }
7851         }
7852
7853         list_for_each_entry_safe(res, temp, &old_res, queue) {
7854                 ipr_clear_res_target(res);
7855                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7856         }
7857
7858         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7859                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7860         else
7861                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7862
7863         LEAVE;
7864         return IPR_RC_JOB_CONTINUE;
7865 }
7866
7867 /**
7868  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7869  * @ipr_cmd:    ipr command struct
7870  *
7871  * This function sends a Query IOA Configuration command
7872  * to the adapter to retrieve the IOA configuration table.
7873  *
7874  * Return value:
7875  *      IPR_RC_JOB_RETURN
7876  **/
7877 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7878 {
7879         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7880         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7881         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7882         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7883
7884         ENTER;
7885         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7886                 ioa_cfg->dual_raid = 1;
7887         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7888                  ucode_vpd->major_release, ucode_vpd->card_type,
7889                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7890         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7891         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7892
7893         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7894         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7895         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7896         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7897
7898         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7899                        IPR_IOADL_FLAGS_READ_LAST);
7900
7901         ipr_cmd->job_step = ipr_init_res_table;
7902
7903         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7904
7905         LEAVE;
7906         return IPR_RC_JOB_RETURN;
7907 }
7908
7909 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7910 {
7911         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7912
7913         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7914                 return IPR_RC_JOB_CONTINUE;
7915
7916         return ipr_reset_cmd_failed(ipr_cmd);
7917 }
7918
7919 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7920                                          __be32 res_handle, u8 sa_code)
7921 {
7922         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7923
7924         ioarcb->res_handle = res_handle;
7925         ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7926         ioarcb->cmd_pkt.cdb[1] = sa_code;
7927         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7928 }
7929
7930 /**
7931  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7932  * action
7933  *
7934  * Return value:
7935  *      none
7936  **/
7937 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7938 {
7939         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7940         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7941         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7942
7943         ENTER;
7944
7945         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7946
7947         if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7948                 ipr_build_ioa_service_action(ipr_cmd,
7949                                              cpu_to_be32(IPR_IOA_RES_HANDLE),
7950                                              IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7951
7952                 ioarcb->cmd_pkt.cdb[2] = 0x40;
7953
7954                 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7955                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7956                            IPR_SET_SUP_DEVICE_TIMEOUT);
7957
7958                 LEAVE;
7959                 return IPR_RC_JOB_RETURN;
7960         }
7961
7962         LEAVE;
7963         return IPR_RC_JOB_CONTINUE;
7964 }
7965
7966 /**
7967  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7968  * @ipr_cmd:    ipr command struct
7969  *
7970  * This utility function sends an inquiry to the adapter.
7971  *
7972  * Return value:
7973  *      none
7974  **/
7975 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7976                               dma_addr_t dma_addr, u8 xfer_len)
7977 {
7978         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7979
7980         ENTER;
7981         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7982         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7983
7984         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7985         ioarcb->cmd_pkt.cdb[1] = flags;
7986         ioarcb->cmd_pkt.cdb[2] = page;
7987         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7988
7989         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7990
7991         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7992         LEAVE;
7993 }
7994
7995 /**
7996  * ipr_inquiry_page_supported - Is the given inquiry page supported
7997  * @page0:              inquiry page 0 buffer
7998  * @page:               page code.
7999  *
8000  * This function determines if the specified inquiry page is supported.
8001  *
8002  * Return value:
8003  *      1 if page is supported / 0 if not
8004  **/
8005 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8006 {
8007         int i;
8008
8009         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8010                 if (page0->page[i] == page)
8011                         return 1;
8012
8013         return 0;
8014 }
8015
8016 /**
8017  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8018  * @ipr_cmd:    ipr command struct
8019  *
8020  * This function sends a Page 0xC4 inquiry to the adapter
8021  * to retrieve software VPD information.
8022  *
8023  * Return value:
8024  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8025  **/
8026 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8027 {
8028         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8029         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8030         struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8031
8032         ENTER;
8033         ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
8034         memset(pageC4, 0, sizeof(*pageC4));
8035
8036         if (ipr_inquiry_page_supported(page0, 0xC4)) {
8037                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8038                                   (ioa_cfg->vpd_cbs_dma
8039                                    + offsetof(struct ipr_misc_cbs,
8040                                               pageC4_data)),
8041                                   sizeof(struct ipr_inquiry_pageC4));
8042                 return IPR_RC_JOB_RETURN;
8043         }
8044
8045         LEAVE;
8046         return IPR_RC_JOB_CONTINUE;
8047 }
8048
8049 /**
8050  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8051  * @ipr_cmd:    ipr command struct
8052  *
8053  * This function sends a Page 0xD0 inquiry to the adapter
8054  * to retrieve adapter capabilities.
8055  *
8056  * Return value:
8057  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8058  **/
8059 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8060 {
8061         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8062         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8063         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8064
8065         ENTER;
8066         ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
8067         memset(cap, 0, sizeof(*cap));
8068
8069         if (ipr_inquiry_page_supported(page0, 0xD0)) {
8070                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8071                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8072                                   sizeof(struct ipr_inquiry_cap));
8073                 return IPR_RC_JOB_RETURN;
8074         }
8075
8076         LEAVE;
8077         return IPR_RC_JOB_CONTINUE;
8078 }
8079
8080 /**
8081  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8082  * @ipr_cmd:    ipr command struct
8083  *
8084  * This function sends a Page 3 inquiry to the adapter
8085  * to retrieve software VPD information.
8086  *
8087  * Return value:
8088  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8089  **/
8090 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8091 {
8092         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8093
8094         ENTER;
8095
8096         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
8097
8098         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8099                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8100                           sizeof(struct ipr_inquiry_page3));
8101
8102         LEAVE;
8103         return IPR_RC_JOB_RETURN;
8104 }
8105
8106 /**
8107  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8108  * @ipr_cmd:    ipr command struct
8109  *
8110  * This function sends a Page 0 inquiry to the adapter
8111  * to retrieve supported inquiry pages.
8112  *
8113  * Return value:
8114  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8115  **/
8116 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8117 {
8118         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8119         char type[5];
8120
8121         ENTER;
8122
8123         /* Grab the type out of the VPD and store it away */
8124         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8125         type[4] = '\0';
8126         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8127
8128         if (ipr_invalid_adapter(ioa_cfg)) {
8129                 dev_err(&ioa_cfg->pdev->dev,
8130                         "Adapter not supported in this hardware configuration.\n");
8131
8132                 if (!ipr_testmode) {
8133                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8134                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8135                         list_add_tail(&ipr_cmd->queue,
8136                                         &ioa_cfg->hrrq->hrrq_free_q);
8137                         return IPR_RC_JOB_RETURN;
8138                 }
8139         }
8140
8141         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
8142
8143         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8144                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8145                           sizeof(struct ipr_inquiry_page0));
8146
8147         LEAVE;
8148         return IPR_RC_JOB_RETURN;
8149 }
8150
8151 /**
8152  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8153  * @ipr_cmd:    ipr command struct
8154  *
8155  * This function sends a standard inquiry to the adapter.
8156  *
8157  * Return value:
8158  *      IPR_RC_JOB_RETURN
8159  **/
8160 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8161 {
8162         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8163
8164         ENTER;
8165         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
8166
8167         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8168                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8169                           sizeof(struct ipr_ioa_vpd));
8170
8171         LEAVE;
8172         return IPR_RC_JOB_RETURN;
8173 }
8174
8175 /**
8176  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8177  * @ipr_cmd:    ipr command struct
8178  *
8179  * This function send an Identify Host Request Response Queue
8180  * command to establish the HRRQ with the adapter.
8181  *
8182  * Return value:
8183  *      IPR_RC_JOB_RETURN
8184  **/
8185 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
8186 {
8187         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8188         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
8189         struct ipr_hrr_queue *hrrq;
8190
8191         ENTER;
8192         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
8193         if (ioa_cfg->identify_hrrq_index == 0)
8194                 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
8195
8196         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8197                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
8198
8199                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8200                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8201
8202                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8203                 if (ioa_cfg->sis64)
8204                         ioarcb->cmd_pkt.cdb[1] = 0x1;
8205
8206                 if (ioa_cfg->nvectors == 1)
8207                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8208                 else
8209                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8210
8211                 ioarcb->cmd_pkt.cdb[2] =
8212                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8213                 ioarcb->cmd_pkt.cdb[3] =
8214                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8215                 ioarcb->cmd_pkt.cdb[4] =
8216                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8217                 ioarcb->cmd_pkt.cdb[5] =
8218                         ((u64) hrrq->host_rrq_dma) & 0xff;
8219                 ioarcb->cmd_pkt.cdb[7] =
8220                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8221                 ioarcb->cmd_pkt.cdb[8] =
8222                         (sizeof(u32) * hrrq->size) & 0xff;
8223
8224                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8225                         ioarcb->cmd_pkt.cdb[9] =
8226                                         ioa_cfg->identify_hrrq_index;
8227
8228                 if (ioa_cfg->sis64) {
8229                         ioarcb->cmd_pkt.cdb[10] =
8230                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8231                         ioarcb->cmd_pkt.cdb[11] =
8232                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8233                         ioarcb->cmd_pkt.cdb[12] =
8234                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8235                         ioarcb->cmd_pkt.cdb[13] =
8236                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8237                 }
8238
8239                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8240                         ioarcb->cmd_pkt.cdb[14] =
8241                                         ioa_cfg->identify_hrrq_index;
8242
8243                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8244                            IPR_INTERNAL_TIMEOUT);
8245
8246                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8247                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8248
8249                 LEAVE;
8250                 return IPR_RC_JOB_RETURN;
8251         }
8252
8253         LEAVE;
8254         return IPR_RC_JOB_CONTINUE;
8255 }
8256
8257 /**
8258  * ipr_reset_timer_done - Adapter reset timer function
8259  * @ipr_cmd:    ipr command struct
8260  *
8261  * Description: This function is used in adapter reset processing
8262  * for timing events. If the reset_cmd pointer in the IOA
8263  * config struct is not this adapter's we are doing nested
8264  * resets and fail_all_ops will take care of freeing the
8265  * command block.
8266  *
8267  * Return value:
8268  *      none
8269  **/
8270 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8271 {
8272         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8273         unsigned long lock_flags = 0;
8274
8275         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8276
8277         if (ioa_cfg->reset_cmd == ipr_cmd) {
8278                 list_del(&ipr_cmd->queue);
8279                 ipr_cmd->done(ipr_cmd);
8280         }
8281
8282         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8283 }
8284
8285 /**
8286  * ipr_reset_start_timer - Start a timer for adapter reset job
8287  * @ipr_cmd:    ipr command struct
8288  * @timeout:    timeout value
8289  *
8290  * Description: This function is used in adapter reset processing
8291  * for timing events. If the reset_cmd pointer in the IOA
8292  * config struct is not this adapter's we are doing nested
8293  * resets and fail_all_ops will take care of freeing the
8294  * command block.
8295  *
8296  * Return value:
8297  *      none
8298  **/
8299 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8300                                   unsigned long timeout)
8301 {
8302
8303         ENTER;
8304         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8305         ipr_cmd->done = ipr_reset_ioa_job;
8306
8307         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8308         ipr_cmd->timer.expires = jiffies + timeout;
8309         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8310         add_timer(&ipr_cmd->timer);
8311 }
8312
8313 /**
8314  * ipr_init_ioa_mem - Initialize ioa_cfg control block
8315  * @ioa_cfg:    ioa cfg struct
8316  *
8317  * Return value:
8318  *      nothing
8319  **/
8320 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8321 {
8322         struct ipr_hrr_queue *hrrq;
8323
8324         for_each_hrrq(hrrq, ioa_cfg) {
8325                 spin_lock(&hrrq->_lock);
8326                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8327
8328                 /* Initialize Host RRQ pointers */
8329                 hrrq->hrrq_start = hrrq->host_rrq;
8330                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8331                 hrrq->hrrq_curr = hrrq->hrrq_start;
8332                 hrrq->toggle_bit = 1;
8333                 spin_unlock(&hrrq->_lock);
8334         }
8335         wmb();
8336
8337         ioa_cfg->identify_hrrq_index = 0;
8338         if (ioa_cfg->hrrq_num == 1)
8339                 atomic_set(&ioa_cfg->hrrq_index, 0);
8340         else
8341                 atomic_set(&ioa_cfg->hrrq_index, 1);
8342
8343         /* Zero out config table */
8344         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8345 }
8346
8347 /**
8348  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8349  * @ipr_cmd:    ipr command struct
8350  *
8351  * Return value:
8352  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8353  **/
8354 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8355 {
8356         unsigned long stage, stage_time;
8357         u32 feedback;
8358         volatile u32 int_reg;
8359         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8360         u64 maskval = 0;
8361
8362         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8363         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8364         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8365
8366         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8367
8368         /* sanity check the stage_time value */
8369         if (stage_time == 0)
8370                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8371         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8372                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8373         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8374                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8375
8376         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8377                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8378                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8379                 stage_time = ioa_cfg->transop_timeout;
8380                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8381         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8382                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8383                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8384                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8385                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8386                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8387                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8388                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8389                         return IPR_RC_JOB_CONTINUE;
8390                 }
8391         }
8392
8393         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8394         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8395         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8396         ipr_cmd->done = ipr_reset_ioa_job;
8397         add_timer(&ipr_cmd->timer);
8398
8399         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8400
8401         return IPR_RC_JOB_RETURN;
8402 }
8403
8404 /**
8405  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8406  * @ipr_cmd:    ipr command struct
8407  *
8408  * This function reinitializes some control blocks and
8409  * enables destructive diagnostics on the adapter.
8410  *
8411  * Return value:
8412  *      IPR_RC_JOB_RETURN
8413  **/
8414 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8415 {
8416         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8417         volatile u32 int_reg;
8418         volatile u64 maskval;
8419         int i;
8420
8421         ENTER;
8422         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8423         ipr_init_ioa_mem(ioa_cfg);
8424
8425         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8426                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8427                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8428                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8429         }
8430         wmb();
8431         if (ioa_cfg->sis64) {
8432                 /* Set the adapter to the correct endian mode. */
8433                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8434                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8435         }
8436
8437         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8438
8439         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8440                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8441                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8442                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8443                 return IPR_RC_JOB_CONTINUE;
8444         }
8445
8446         /* Enable destructive diagnostics on IOA */
8447         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8448
8449         if (ioa_cfg->sis64) {
8450                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8451                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8452                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8453         } else
8454                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8455
8456         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8457
8458         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8459
8460         if (ioa_cfg->sis64) {
8461                 ipr_cmd->job_step = ipr_reset_next_stage;
8462                 return IPR_RC_JOB_CONTINUE;
8463         }
8464
8465         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8466         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8467         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8468         ipr_cmd->done = ipr_reset_ioa_job;
8469         add_timer(&ipr_cmd->timer);
8470         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8471
8472         LEAVE;
8473         return IPR_RC_JOB_RETURN;
8474 }
8475
8476 /**
8477  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8478  * @ipr_cmd:    ipr command struct
8479  *
8480  * This function is invoked when an adapter dump has run out
8481  * of processing time.
8482  *
8483  * Return value:
8484  *      IPR_RC_JOB_CONTINUE
8485  **/
8486 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8487 {
8488         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8489
8490         if (ioa_cfg->sdt_state == GET_DUMP)
8491                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8492         else if (ioa_cfg->sdt_state == READ_DUMP)
8493                 ioa_cfg->sdt_state = ABORT_DUMP;
8494
8495         ioa_cfg->dump_timeout = 1;
8496         ipr_cmd->job_step = ipr_reset_alert;
8497
8498         return IPR_RC_JOB_CONTINUE;
8499 }
8500
8501 /**
8502  * ipr_unit_check_no_data - Log a unit check/no data error log
8503  * @ioa_cfg:            ioa config struct
8504  *
8505  * Logs an error indicating the adapter unit checked, but for some
8506  * reason, we were unable to fetch the unit check buffer.
8507  *
8508  * Return value:
8509  *      nothing
8510  **/
8511 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8512 {
8513         ioa_cfg->errors_logged++;
8514         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8515 }
8516
8517 /**
8518  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8519  * @ioa_cfg:            ioa config struct
8520  *
8521  * Fetches the unit check buffer from the adapter by clocking the data
8522  * through the mailbox register.
8523  *
8524  * Return value:
8525  *      nothing
8526  **/
8527 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8528 {
8529         unsigned long mailbox;
8530         struct ipr_hostrcb *hostrcb;
8531         struct ipr_uc_sdt sdt;
8532         int rc, length;
8533         u32 ioasc;
8534
8535         mailbox = readl(ioa_cfg->ioa_mailbox);
8536
8537         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8538                 ipr_unit_check_no_data(ioa_cfg);
8539                 return;
8540         }
8541
8542         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8543         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8544                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8545
8546         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8547             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8548             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8549                 ipr_unit_check_no_data(ioa_cfg);
8550                 return;
8551         }
8552
8553         /* Find length of the first sdt entry (UC buffer) */
8554         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8555                 length = be32_to_cpu(sdt.entry[0].end_token);
8556         else
8557                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8558                           be32_to_cpu(sdt.entry[0].start_token)) &
8559                           IPR_FMT2_MBX_ADDR_MASK;
8560
8561         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8562                              struct ipr_hostrcb, queue);
8563         list_del_init(&hostrcb->queue);
8564         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8565
8566         rc = ipr_get_ldump_data_section(ioa_cfg,
8567                                         be32_to_cpu(sdt.entry[0].start_token),
8568                                         (__be32 *)&hostrcb->hcam,
8569                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8570
8571         if (!rc) {
8572                 ipr_handle_log_data(ioa_cfg, hostrcb);
8573                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8574                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8575                     ioa_cfg->sdt_state == GET_DUMP)
8576                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8577         } else
8578                 ipr_unit_check_no_data(ioa_cfg);
8579
8580         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8581 }
8582
8583 /**
8584  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8585  * @ipr_cmd:    ipr command struct
8586  *
8587  * Description: This function will call to get the unit check buffer.
8588  *
8589  * Return value:
8590  *      IPR_RC_JOB_RETURN
8591  **/
8592 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8593 {
8594         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8595
8596         ENTER;
8597         ioa_cfg->ioa_unit_checked = 0;
8598         ipr_get_unit_check_buffer(ioa_cfg);
8599         ipr_cmd->job_step = ipr_reset_alert;
8600         ipr_reset_start_timer(ipr_cmd, 0);
8601
8602         LEAVE;
8603         return IPR_RC_JOB_RETURN;
8604 }
8605
8606 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8607 {
8608         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8609
8610         ENTER;
8611
8612         if (ioa_cfg->sdt_state != GET_DUMP)
8613                 return IPR_RC_JOB_RETURN;
8614
8615         if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8616             (readl(ioa_cfg->regs.sense_interrupt_reg) &
8617              IPR_PCII_MAILBOX_STABLE)) {
8618
8619                 if (!ipr_cmd->u.time_left)
8620                         dev_err(&ioa_cfg->pdev->dev,
8621                                 "Timed out waiting for Mailbox register.\n");
8622
8623                 ioa_cfg->sdt_state = READ_DUMP;
8624                 ioa_cfg->dump_timeout = 0;
8625                 if (ioa_cfg->sis64)
8626                         ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8627                 else
8628                         ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8629                 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8630                 schedule_work(&ioa_cfg->work_q);
8631
8632         } else {
8633                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8634                 ipr_reset_start_timer(ipr_cmd,
8635                                       IPR_CHECK_FOR_RESET_TIMEOUT);
8636         }
8637
8638         LEAVE;
8639         return IPR_RC_JOB_RETURN;
8640 }
8641
8642 /**
8643  * ipr_reset_restore_cfg_space - Restore PCI config space.
8644  * @ipr_cmd:    ipr command struct
8645  *
8646  * Description: This function restores the saved PCI config space of
8647  * the adapter, fails all outstanding ops back to the callers, and
8648  * fetches the dump/unit check if applicable to this reset.
8649  *
8650  * Return value:
8651  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8652  **/
8653 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8654 {
8655         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8656         u32 int_reg;
8657
8658         ENTER;
8659         ioa_cfg->pdev->state_saved = true;
8660         pci_restore_state(ioa_cfg->pdev);
8661
8662         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8663                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8664                 return IPR_RC_JOB_CONTINUE;
8665         }
8666
8667         ipr_fail_all_ops(ioa_cfg);
8668
8669         if (ioa_cfg->sis64) {
8670                 /* Set the adapter to the correct endian mode. */
8671                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8672                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8673         }
8674
8675         if (ioa_cfg->ioa_unit_checked) {
8676                 if (ioa_cfg->sis64) {
8677                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8678                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8679                         return IPR_RC_JOB_RETURN;
8680                 } else {
8681                         ioa_cfg->ioa_unit_checked = 0;
8682                         ipr_get_unit_check_buffer(ioa_cfg);
8683                         ipr_cmd->job_step = ipr_reset_alert;
8684                         ipr_reset_start_timer(ipr_cmd, 0);
8685                         return IPR_RC_JOB_RETURN;
8686                 }
8687         }
8688
8689         if (ioa_cfg->in_ioa_bringdown) {
8690                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8691         } else if (ioa_cfg->sdt_state == GET_DUMP) {
8692                 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8693                 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8694         } else {
8695                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8696         }
8697
8698         LEAVE;
8699         return IPR_RC_JOB_CONTINUE;
8700 }
8701
8702 /**
8703  * ipr_reset_bist_done - BIST has completed on the adapter.
8704  * @ipr_cmd:    ipr command struct
8705  *
8706  * Description: Unblock config space and resume the reset process.
8707  *
8708  * Return value:
8709  *      IPR_RC_JOB_CONTINUE
8710  **/
8711 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8712 {
8713         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8714
8715         ENTER;
8716         if (ioa_cfg->cfg_locked)
8717                 pci_cfg_access_unlock(ioa_cfg->pdev);
8718         ioa_cfg->cfg_locked = 0;
8719         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8720         LEAVE;
8721         return IPR_RC_JOB_CONTINUE;
8722 }
8723
8724 /**
8725  * ipr_reset_start_bist - Run BIST on the adapter.
8726  * @ipr_cmd:    ipr command struct
8727  *
8728  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8729  *
8730  * Return value:
8731  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8732  **/
8733 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8734 {
8735         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8736         int rc = PCIBIOS_SUCCESSFUL;
8737
8738         ENTER;
8739         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8740                 writel(IPR_UPROCI_SIS64_START_BIST,
8741                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8742         else
8743                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8744
8745         if (rc == PCIBIOS_SUCCESSFUL) {
8746                 ipr_cmd->job_step = ipr_reset_bist_done;
8747                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8748                 rc = IPR_RC_JOB_RETURN;
8749         } else {
8750                 if (ioa_cfg->cfg_locked)
8751                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8752                 ioa_cfg->cfg_locked = 0;
8753                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8754                 rc = IPR_RC_JOB_CONTINUE;
8755         }
8756
8757         LEAVE;
8758         return rc;
8759 }
8760
8761 /**
8762  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8763  * @ipr_cmd:    ipr command struct
8764  *
8765  * Description: This clears PCI reset to the adapter and delays two seconds.
8766  *
8767  * Return value:
8768  *      IPR_RC_JOB_RETURN
8769  **/
8770 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8771 {
8772         ENTER;
8773         ipr_cmd->job_step = ipr_reset_bist_done;
8774         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8775         LEAVE;
8776         return IPR_RC_JOB_RETURN;
8777 }
8778
8779 /**
8780  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8781  * @work:       work struct
8782  *
8783  * Description: This pulses warm reset to a slot.
8784  *
8785  **/
8786 static void ipr_reset_reset_work(struct work_struct *work)
8787 {
8788         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8789         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8790         struct pci_dev *pdev = ioa_cfg->pdev;
8791         unsigned long lock_flags = 0;
8792
8793         ENTER;
8794         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8795         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8796         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8797
8798         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8799         if (ioa_cfg->reset_cmd == ipr_cmd)
8800                 ipr_reset_ioa_job(ipr_cmd);
8801         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8802         LEAVE;
8803 }
8804
8805 /**
8806  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8807  * @ipr_cmd:    ipr command struct
8808  *
8809  * Description: This asserts PCI reset to the adapter.
8810  *
8811  * Return value:
8812  *      IPR_RC_JOB_RETURN
8813  **/
8814 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8815 {
8816         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8817
8818         ENTER;
8819         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8820         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8821         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8822         LEAVE;
8823         return IPR_RC_JOB_RETURN;
8824 }
8825
8826 /**
8827  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8828  * @ipr_cmd:    ipr command struct
8829  *
8830  * Description: This attempts to block config access to the IOA.
8831  *
8832  * Return value:
8833  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8834  **/
8835 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8836 {
8837         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8838         int rc = IPR_RC_JOB_CONTINUE;
8839
8840         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8841                 ioa_cfg->cfg_locked = 1;
8842                 ipr_cmd->job_step = ioa_cfg->reset;
8843         } else {
8844                 if (ipr_cmd->u.time_left) {
8845                         rc = IPR_RC_JOB_RETURN;
8846                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8847                         ipr_reset_start_timer(ipr_cmd,
8848                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8849                 } else {
8850                         ipr_cmd->job_step = ioa_cfg->reset;
8851                         dev_err(&ioa_cfg->pdev->dev,
8852                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8853                 }
8854         }
8855
8856         return rc;
8857 }
8858
8859 /**
8860  * ipr_reset_block_config_access - Block config access to the IOA
8861  * @ipr_cmd:    ipr command struct
8862  *
8863  * Description: This attempts to block config access to the IOA
8864  *
8865  * Return value:
8866  *      IPR_RC_JOB_CONTINUE
8867  **/
8868 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8869 {
8870         ipr_cmd->ioa_cfg->cfg_locked = 0;
8871         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8872         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8873         return IPR_RC_JOB_CONTINUE;
8874 }
8875
8876 /**
8877  * ipr_reset_allowed - Query whether or not IOA can be reset
8878  * @ioa_cfg:    ioa config struct
8879  *
8880  * Return value:
8881  *      0 if reset not allowed / non-zero if reset is allowed
8882  **/
8883 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8884 {
8885         volatile u32 temp_reg;
8886
8887         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8888         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8889 }
8890
8891 /**
8892  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8893  * @ipr_cmd:    ipr command struct
8894  *
8895  * Description: This function waits for adapter permission to run BIST,
8896  * then runs BIST. If the adapter does not give permission after a
8897  * reasonable time, we will reset the adapter anyway. The impact of
8898  * resetting the adapter without warning the adapter is the risk of
8899  * losing the persistent error log on the adapter. If the adapter is
8900  * reset while it is writing to the flash on the adapter, the flash
8901  * segment will have bad ECC and be zeroed.
8902  *
8903  * Return value:
8904  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8905  **/
8906 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8907 {
8908         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8909         int rc = IPR_RC_JOB_RETURN;
8910
8911         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8912                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8913                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8914         } else {
8915                 ipr_cmd->job_step = ipr_reset_block_config_access;
8916                 rc = IPR_RC_JOB_CONTINUE;
8917         }
8918
8919         return rc;
8920 }
8921
8922 /**
8923  * ipr_reset_alert - Alert the adapter of a pending reset
8924  * @ipr_cmd:    ipr command struct
8925  *
8926  * Description: This function alerts the adapter that it will be reset.
8927  * If memory space is not currently enabled, proceed directly
8928  * to running BIST on the adapter. The timer must always be started
8929  * so we guarantee we do not run BIST from ipr_isr.
8930  *
8931  * Return value:
8932  *      IPR_RC_JOB_RETURN
8933  **/
8934 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8935 {
8936         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8937         u16 cmd_reg;
8938         int rc;
8939
8940         ENTER;
8941         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8942
8943         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8944                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8945                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8946                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8947         } else {
8948                 ipr_cmd->job_step = ipr_reset_block_config_access;
8949         }
8950
8951         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8952         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8953
8954         LEAVE;
8955         return IPR_RC_JOB_RETURN;
8956 }
8957
8958 /**
8959  * ipr_reset_quiesce_done - Complete IOA disconnect
8960  * @ipr_cmd:    ipr command struct
8961  *
8962  * Description: Freeze the adapter to complete quiesce processing
8963  *
8964  * Return value:
8965  *      IPR_RC_JOB_CONTINUE
8966  **/
8967 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8968 {
8969         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8970
8971         ENTER;
8972         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8973         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8974         LEAVE;
8975         return IPR_RC_JOB_CONTINUE;
8976 }
8977
8978 /**
8979  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8980  * @ipr_cmd:    ipr command struct
8981  *
8982  * Description: Ensure nothing is outstanding to the IOA and
8983  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8984  *
8985  * Return value:
8986  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8987  **/
8988 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8989 {
8990         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8991         struct ipr_cmnd *loop_cmd;
8992         struct ipr_hrr_queue *hrrq;
8993         int rc = IPR_RC_JOB_CONTINUE;
8994         int count = 0;
8995
8996         ENTER;
8997         ipr_cmd->job_step = ipr_reset_quiesce_done;
8998
8999         for_each_hrrq(hrrq, ioa_cfg) {
9000                 spin_lock(&hrrq->_lock);
9001                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9002                         count++;
9003                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9004                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9005                         rc = IPR_RC_JOB_RETURN;
9006                         break;
9007                 }
9008                 spin_unlock(&hrrq->_lock);
9009
9010                 if (count)
9011                         break;
9012         }
9013
9014         LEAVE;
9015         return rc;
9016 }
9017
9018 /**
9019  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9020  * @ipr_cmd:    ipr command struct
9021  *
9022  * Description: Cancel any oustanding HCAMs to the IOA.
9023  *
9024  * Return value:
9025  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9026  **/
9027 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9028 {
9029         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9030         int rc = IPR_RC_JOB_CONTINUE;
9031         struct ipr_cmd_pkt *cmd_pkt;
9032         struct ipr_cmnd *hcam_cmd;
9033         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9034
9035         ENTER;
9036         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9037
9038         if (!hrrq->ioa_is_dead) {
9039                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9040                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9041                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9042                                         continue;
9043
9044                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9045                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9046                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9047                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9048                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9049                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9050                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9051                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9052                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9053                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9054                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9055                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9056                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9057                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9058
9059                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9060                                            IPR_CANCEL_TIMEOUT);
9061
9062                                 rc = IPR_RC_JOB_RETURN;
9063                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9064                                 break;
9065                         }
9066                 }
9067         } else
9068                 ipr_cmd->job_step = ipr_reset_alert;
9069
9070         LEAVE;
9071         return rc;
9072 }
9073
9074 /**
9075  * ipr_reset_ucode_download_done - Microcode download completion
9076  * @ipr_cmd:    ipr command struct
9077  *
9078  * Description: This function unmaps the microcode download buffer.
9079  *
9080  * Return value:
9081  *      IPR_RC_JOB_CONTINUE
9082  **/
9083 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9084 {
9085         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9086         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9087
9088         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
9089                      sglist->num_sg, DMA_TO_DEVICE);
9090
9091         ipr_cmd->job_step = ipr_reset_alert;
9092         return IPR_RC_JOB_CONTINUE;
9093 }
9094
9095 /**
9096  * ipr_reset_ucode_download - Download microcode to the adapter
9097  * @ipr_cmd:    ipr command struct
9098  *
9099  * Description: This function checks to see if it there is microcode
9100  * to download to the adapter. If there is, a download is performed.
9101  *
9102  * Return value:
9103  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9104  **/
9105 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9106 {
9107         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9108         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9109
9110         ENTER;
9111         ipr_cmd->job_step = ipr_reset_alert;
9112
9113         if (!sglist)
9114                 return IPR_RC_JOB_CONTINUE;
9115
9116         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9117         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9118         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9119         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9120         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9121         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9122         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9123
9124         if (ioa_cfg->sis64)
9125                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9126         else
9127                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
9128         ipr_cmd->job_step = ipr_reset_ucode_download_done;
9129
9130         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9131                    IPR_WRITE_BUFFER_TIMEOUT);
9132
9133         LEAVE;
9134         return IPR_RC_JOB_RETURN;
9135 }
9136
9137 /**
9138  * ipr_reset_shutdown_ioa - Shutdown the adapter
9139  * @ipr_cmd:    ipr command struct
9140  *
9141  * Description: This function issues an adapter shutdown of the
9142  * specified type to the specified adapter as part of the
9143  * adapter reset job.
9144  *
9145  * Return value:
9146  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9147  **/
9148 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9149 {
9150         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9151         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9152         unsigned long timeout;
9153         int rc = IPR_RC_JOB_CONTINUE;
9154
9155         ENTER;
9156         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9157                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9158         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
9159                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
9160                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9161                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9162                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9163                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9164
9165                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9166                         timeout = IPR_SHUTDOWN_TIMEOUT;
9167                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9168                         timeout = IPR_INTERNAL_TIMEOUT;
9169                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9170                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
9171                 else
9172                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
9173
9174                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9175
9176                 rc = IPR_RC_JOB_RETURN;
9177                 ipr_cmd->job_step = ipr_reset_ucode_download;
9178         } else
9179                 ipr_cmd->job_step = ipr_reset_alert;
9180
9181         LEAVE;
9182         return rc;
9183 }
9184
9185 /**
9186  * ipr_reset_ioa_job - Adapter reset job
9187  * @ipr_cmd:    ipr command struct
9188  *
9189  * Description: This function is the job router for the adapter reset job.
9190  *
9191  * Return value:
9192  *      none
9193  **/
9194 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9195 {
9196         u32 rc, ioasc;
9197         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9198
9199         do {
9200                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
9201
9202                 if (ioa_cfg->reset_cmd != ipr_cmd) {
9203                         /*
9204                          * We are doing nested adapter resets and this is
9205                          * not the current reset job.
9206                          */
9207                         list_add_tail(&ipr_cmd->queue,
9208                                         &ipr_cmd->hrrq->hrrq_free_q);
9209                         return;
9210                 }
9211
9212                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
9213                         rc = ipr_cmd->job_step_failed(ipr_cmd);
9214                         if (rc == IPR_RC_JOB_RETURN)
9215                                 return;
9216                 }
9217
9218                 ipr_reinit_ipr_cmnd(ipr_cmd);
9219                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
9220                 rc = ipr_cmd->job_step(ipr_cmd);
9221         } while (rc == IPR_RC_JOB_CONTINUE);
9222 }
9223
9224 /**
9225  * _ipr_initiate_ioa_reset - Initiate an adapter reset
9226  * @ioa_cfg:            ioa config struct
9227  * @job_step:           first job step of reset job
9228  * @shutdown_type:      shutdown type
9229  *
9230  * Description: This function will initiate the reset of the given adapter
9231  * starting at the selected job step.
9232  * If the caller needs to wait on the completion of the reset,
9233  * the caller must sleep on the reset_wait_q.
9234  *
9235  * Return value:
9236  *      none
9237  **/
9238 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9239                                     int (*job_step) (struct ipr_cmnd *),
9240                                     enum ipr_shutdown_type shutdown_type)
9241 {
9242         struct ipr_cmnd *ipr_cmd;
9243         int i;
9244
9245         ioa_cfg->in_reset_reload = 1;
9246         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9247                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9248                 ioa_cfg->hrrq[i].allow_cmds = 0;
9249                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9250         }
9251         wmb();
9252         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9253                 scsi_block_requests(ioa_cfg->host);
9254
9255         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9256         ioa_cfg->reset_cmd = ipr_cmd;
9257         ipr_cmd->job_step = job_step;
9258         ipr_cmd->u.shutdown_type = shutdown_type;
9259
9260         ipr_reset_ioa_job(ipr_cmd);
9261 }
9262
9263 /**
9264  * ipr_initiate_ioa_reset - Initiate an adapter reset
9265  * @ioa_cfg:            ioa config struct
9266  * @shutdown_type:      shutdown type
9267  *
9268  * Description: This function will initiate the reset of the given adapter.
9269  * If the caller needs to wait on the completion of the reset,
9270  * the caller must sleep on the reset_wait_q.
9271  *
9272  * Return value:
9273  *      none
9274  **/
9275 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9276                                    enum ipr_shutdown_type shutdown_type)
9277 {
9278         int i;
9279
9280         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9281                 return;
9282
9283         if (ioa_cfg->in_reset_reload) {
9284                 if (ioa_cfg->sdt_state == GET_DUMP)
9285                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9286                 else if (ioa_cfg->sdt_state == READ_DUMP)
9287                         ioa_cfg->sdt_state = ABORT_DUMP;
9288         }
9289
9290         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9291                 dev_err(&ioa_cfg->pdev->dev,
9292                         "IOA taken offline - error recovery failed\n");
9293
9294                 ioa_cfg->reset_retries = 0;
9295                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9296                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9297                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
9298                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9299                 }
9300                 wmb();
9301
9302                 if (ioa_cfg->in_ioa_bringdown) {
9303                         ioa_cfg->reset_cmd = NULL;
9304                         ioa_cfg->in_reset_reload = 0;
9305                         ipr_fail_all_ops(ioa_cfg);
9306                         wake_up_all(&ioa_cfg->reset_wait_q);
9307
9308                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9309                                 spin_unlock_irq(ioa_cfg->host->host_lock);
9310                                 scsi_unblock_requests(ioa_cfg->host);
9311                                 spin_lock_irq(ioa_cfg->host->host_lock);
9312                         }
9313                         return;
9314                 } else {
9315                         ioa_cfg->in_ioa_bringdown = 1;
9316                         shutdown_type = IPR_SHUTDOWN_NONE;
9317                 }
9318         }
9319
9320         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9321                                 shutdown_type);
9322 }
9323
9324 /**
9325  * ipr_reset_freeze - Hold off all I/O activity
9326  * @ipr_cmd:    ipr command struct
9327  *
9328  * Description: If the PCI slot is frozen, hold off all I/O
9329  * activity; then, as soon as the slot is available again,
9330  * initiate an adapter reset.
9331  */
9332 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9333 {
9334         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9335         int i;
9336
9337         /* Disallow new interrupts, avoid loop */
9338         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9339                 spin_lock(&ioa_cfg->hrrq[i]._lock);
9340                 ioa_cfg->hrrq[i].allow_interrupts = 0;
9341                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9342         }
9343         wmb();
9344         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9345         ipr_cmd->done = ipr_reset_ioa_job;
9346         return IPR_RC_JOB_RETURN;
9347 }
9348
9349 /**
9350  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9351  * @pdev:       PCI device struct
9352  *
9353  * Description: This routine is called to tell us that the MMIO
9354  * access to the IOA has been restored
9355  */
9356 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9357 {
9358         unsigned long flags = 0;
9359         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9360
9361         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9362         if (!ioa_cfg->probe_done)
9363                 pci_save_state(pdev);
9364         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9365         return PCI_ERS_RESULT_NEED_RESET;
9366 }
9367
9368 /**
9369  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9370  * @pdev:       PCI device struct
9371  *
9372  * Description: This routine is called to tell us that the PCI bus
9373  * is down. Can't do anything here, except put the device driver
9374  * into a holding pattern, waiting for the PCI bus to come back.
9375  */
9376 static void ipr_pci_frozen(struct pci_dev *pdev)
9377 {
9378         unsigned long flags = 0;
9379         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9380
9381         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9382         if (ioa_cfg->probe_done)
9383                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9384         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9385 }
9386
9387 /**
9388  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9389  * @pdev:       PCI device struct
9390  *
9391  * Description: This routine is called by the pci error recovery
9392  * code after the PCI slot has been reset, just before we
9393  * should resume normal operations.
9394  */
9395 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9396 {
9397         unsigned long flags = 0;
9398         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9399
9400         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9401         if (ioa_cfg->probe_done) {
9402                 if (ioa_cfg->needs_warm_reset)
9403                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9404                 else
9405                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9406                                                 IPR_SHUTDOWN_NONE);
9407         } else
9408                 wake_up_all(&ioa_cfg->eeh_wait_q);
9409         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9410         return PCI_ERS_RESULT_RECOVERED;
9411 }
9412
9413 /**
9414  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9415  * @pdev:       PCI device struct
9416  *
9417  * Description: This routine is called when the PCI bus has
9418  * permanently failed.
9419  */
9420 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9421 {
9422         unsigned long flags = 0;
9423         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9424         int i;
9425
9426         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9427         if (ioa_cfg->probe_done) {
9428                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9429                         ioa_cfg->sdt_state = ABORT_DUMP;
9430                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9431                 ioa_cfg->in_ioa_bringdown = 1;
9432                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9433                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9434                         ioa_cfg->hrrq[i].allow_cmds = 0;
9435                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9436                 }
9437                 wmb();
9438                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9439         } else
9440                 wake_up_all(&ioa_cfg->eeh_wait_q);
9441         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9442 }
9443
9444 /**
9445  * ipr_pci_error_detected - Called when a PCI error is detected.
9446  * @pdev:       PCI device struct
9447  * @state:      PCI channel state
9448  *
9449  * Description: Called when a PCI error is detected.
9450  *
9451  * Return value:
9452  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9453  */
9454 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9455                                                pci_channel_state_t state)
9456 {
9457         switch (state) {
9458         case pci_channel_io_frozen:
9459                 ipr_pci_frozen(pdev);
9460                 return PCI_ERS_RESULT_CAN_RECOVER;
9461         case pci_channel_io_perm_failure:
9462                 ipr_pci_perm_failure(pdev);
9463                 return PCI_ERS_RESULT_DISCONNECT;
9464                 break;
9465         default:
9466                 break;
9467         }
9468         return PCI_ERS_RESULT_NEED_RESET;
9469 }
9470
9471 /**
9472  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9473  * @ioa_cfg:    ioa cfg struct
9474  *
9475  * Description: This is the second phase of adapter initialization
9476  * This function takes care of initilizing the adapter to the point
9477  * where it can accept new commands.
9478
9479  * Return value:
9480  *      0 on success / -EIO on failure
9481  **/
9482 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9483 {
9484         int rc = 0;
9485         unsigned long host_lock_flags = 0;
9486
9487         ENTER;
9488         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9489         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9490         ioa_cfg->probe_done = 1;
9491         if (ioa_cfg->needs_hard_reset) {
9492                 ioa_cfg->needs_hard_reset = 0;
9493                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9494         } else
9495                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9496                                         IPR_SHUTDOWN_NONE);
9497         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9498
9499         LEAVE;
9500         return rc;
9501 }
9502
9503 /**
9504  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9505  * @ioa_cfg:    ioa config struct
9506  *
9507  * Return value:
9508  *      none
9509  **/
9510 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9511 {
9512         int i;
9513
9514         if (ioa_cfg->ipr_cmnd_list) {
9515                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9516                         if (ioa_cfg->ipr_cmnd_list[i])
9517                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9518                                               ioa_cfg->ipr_cmnd_list[i],
9519                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9520
9521                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9522                 }
9523         }
9524
9525         if (ioa_cfg->ipr_cmd_pool)
9526                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9527
9528         kfree(ioa_cfg->ipr_cmnd_list);
9529         kfree(ioa_cfg->ipr_cmnd_list_dma);
9530         ioa_cfg->ipr_cmnd_list = NULL;
9531         ioa_cfg->ipr_cmnd_list_dma = NULL;
9532         ioa_cfg->ipr_cmd_pool = NULL;
9533 }
9534
9535 /**
9536  * ipr_free_mem - Frees memory allocated for an adapter
9537  * @ioa_cfg:    ioa cfg struct
9538  *
9539  * Return value:
9540  *      nothing
9541  **/
9542 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9543 {
9544         int i;
9545
9546         kfree(ioa_cfg->res_entries);
9547         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9548                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9549         ipr_free_cmd_blks(ioa_cfg);
9550
9551         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9552                 dma_free_coherent(&ioa_cfg->pdev->dev,
9553                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9554                                   ioa_cfg->hrrq[i].host_rrq,
9555                                   ioa_cfg->hrrq[i].host_rrq_dma);
9556
9557         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9558                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9559
9560         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9561                 dma_free_coherent(&ioa_cfg->pdev->dev,
9562                                   sizeof(struct ipr_hostrcb),
9563                                   ioa_cfg->hostrcb[i],
9564                                   ioa_cfg->hostrcb_dma[i]);
9565         }
9566
9567         ipr_free_dump(ioa_cfg);
9568         kfree(ioa_cfg->trace);
9569 }
9570
9571 /**
9572  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9573  * @ioa_cfg:    ipr cfg struct
9574  *
9575  * This function frees all allocated IRQs for the
9576  * specified adapter.
9577  *
9578  * Return value:
9579  *      none
9580  **/
9581 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9582 {
9583         struct pci_dev *pdev = ioa_cfg->pdev;
9584         int i;
9585
9586         for (i = 0; i < ioa_cfg->nvectors; i++)
9587                 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9588         pci_free_irq_vectors(pdev);
9589 }
9590
9591 /**
9592  * ipr_free_all_resources - Free all allocated resources for an adapter.
9593  * @ipr_cmd:    ipr command struct
9594  *
9595  * This function frees all allocated resources for the
9596  * specified adapter.
9597  *
9598  * Return value:
9599  *      none
9600  **/
9601 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9602 {
9603         struct pci_dev *pdev = ioa_cfg->pdev;
9604
9605         ENTER;
9606         ipr_free_irqs(ioa_cfg);
9607         if (ioa_cfg->reset_work_q)
9608                 destroy_workqueue(ioa_cfg->reset_work_q);
9609         iounmap(ioa_cfg->hdw_dma_regs);
9610         pci_release_regions(pdev);
9611         ipr_free_mem(ioa_cfg);
9612         scsi_host_put(ioa_cfg->host);
9613         pci_disable_device(pdev);
9614         LEAVE;
9615 }
9616
9617 /**
9618  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9619  * @ioa_cfg:    ioa config struct
9620  *
9621  * Return value:
9622  *      0 on success / -ENOMEM on allocation failure
9623  **/
9624 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9625 {
9626         struct ipr_cmnd *ipr_cmd;
9627         struct ipr_ioarcb *ioarcb;
9628         dma_addr_t dma_addr;
9629         int i, entries_each_hrrq, hrrq_id = 0;
9630
9631         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9632                                                 sizeof(struct ipr_cmnd), 512, 0);
9633
9634         if (!ioa_cfg->ipr_cmd_pool)
9635                 return -ENOMEM;
9636
9637         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9638         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9639
9640         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9641                 ipr_free_cmd_blks(ioa_cfg);
9642                 return -ENOMEM;
9643         }
9644
9645         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9646                 if (ioa_cfg->hrrq_num > 1) {
9647                         if (i == 0) {
9648                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9649                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9650                                         ioa_cfg->hrrq[i].max_cmd_id =
9651                                                 (entries_each_hrrq - 1);
9652                         } else {
9653                                 entries_each_hrrq =
9654                                         IPR_NUM_BASE_CMD_BLKS/
9655                                         (ioa_cfg->hrrq_num - 1);
9656                                 ioa_cfg->hrrq[i].min_cmd_id =
9657                                         IPR_NUM_INTERNAL_CMD_BLKS +
9658                                         (i - 1) * entries_each_hrrq;
9659                                 ioa_cfg->hrrq[i].max_cmd_id =
9660                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9661                                         i * entries_each_hrrq - 1);
9662                         }
9663                 } else {
9664                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9665                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9666                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9667                 }
9668                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9669         }
9670
9671         BUG_ON(ioa_cfg->hrrq_num == 0);
9672
9673         i = IPR_NUM_CMD_BLKS -
9674                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9675         if (i > 0) {
9676                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9677                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9678         }
9679
9680         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9681                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9682
9683                 if (!ipr_cmd) {
9684                         ipr_free_cmd_blks(ioa_cfg);
9685                         return -ENOMEM;
9686                 }
9687
9688                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9689                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9690                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9691
9692                 ioarcb = &ipr_cmd->ioarcb;
9693                 ipr_cmd->dma_addr = dma_addr;
9694                 if (ioa_cfg->sis64)
9695                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9696                 else
9697                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9698
9699                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9700                 if (ioa_cfg->sis64) {
9701                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9702                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9703                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9704                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9705                 } else {
9706                         ioarcb->write_ioadl_addr =
9707                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9708                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9709                         ioarcb->ioasa_host_pci_addr =
9710                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9711                 }
9712                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9713                 ipr_cmd->cmd_index = i;
9714                 ipr_cmd->ioa_cfg = ioa_cfg;
9715                 ipr_cmd->sense_buffer_dma = dma_addr +
9716                         offsetof(struct ipr_cmnd, sense_buffer);
9717
9718                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9719                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9720                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9721                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9722                         hrrq_id++;
9723         }
9724
9725         return 0;
9726 }
9727
9728 /**
9729  * ipr_alloc_mem - Allocate memory for an adapter
9730  * @ioa_cfg:    ioa config struct
9731  *
9732  * Return value:
9733  *      0 on success / non-zero for error
9734  **/
9735 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9736 {
9737         struct pci_dev *pdev = ioa_cfg->pdev;
9738         int i, rc = -ENOMEM;
9739
9740         ENTER;
9741         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9742                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9743
9744         if (!ioa_cfg->res_entries)
9745                 goto out;
9746
9747         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9748                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9749                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9750         }
9751
9752         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9753                                               sizeof(struct ipr_misc_cbs),
9754                                               &ioa_cfg->vpd_cbs_dma,
9755                                               GFP_KERNEL);
9756
9757         if (!ioa_cfg->vpd_cbs)
9758                 goto out_free_res_entries;
9759
9760         if (ipr_alloc_cmd_blks(ioa_cfg))
9761                 goto out_free_vpd_cbs;
9762
9763         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9764                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9765                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9766                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9767                                         GFP_KERNEL);
9768
9769                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9770                         while (--i > 0)
9771                                 dma_free_coherent(&pdev->dev,
9772                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9773                                         ioa_cfg->hrrq[i].host_rrq,
9774                                         ioa_cfg->hrrq[i].host_rrq_dma);
9775                         goto out_ipr_free_cmd_blocks;
9776                 }
9777                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9778         }
9779
9780         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9781                                                   ioa_cfg->cfg_table_size,
9782                                                   &ioa_cfg->cfg_table_dma,
9783                                                   GFP_KERNEL);
9784
9785         if (!ioa_cfg->u.cfg_table)
9786                 goto out_free_host_rrq;
9787
9788         for (i = 0; i < IPR_MAX_HCAMS; i++) {
9789                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9790                                                          sizeof(struct ipr_hostrcb),
9791                                                          &ioa_cfg->hostrcb_dma[i],
9792                                                          GFP_KERNEL);
9793
9794                 if (!ioa_cfg->hostrcb[i])
9795                         goto out_free_hostrcb_dma;
9796
9797                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9798                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9799                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9800                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9801         }
9802
9803         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9804                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9805
9806         if (!ioa_cfg->trace)
9807                 goto out_free_hostrcb_dma;
9808
9809         rc = 0;
9810 out:
9811         LEAVE;
9812         return rc;
9813
9814 out_free_hostrcb_dma:
9815         while (i-- > 0) {
9816                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9817                                   ioa_cfg->hostrcb[i],
9818                                   ioa_cfg->hostrcb_dma[i]);
9819         }
9820         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9821                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9822 out_free_host_rrq:
9823         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9824                 dma_free_coherent(&pdev->dev,
9825                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9826                                   ioa_cfg->hrrq[i].host_rrq,
9827                                   ioa_cfg->hrrq[i].host_rrq_dma);
9828         }
9829 out_ipr_free_cmd_blocks:
9830         ipr_free_cmd_blks(ioa_cfg);
9831 out_free_vpd_cbs:
9832         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9833                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9834 out_free_res_entries:
9835         kfree(ioa_cfg->res_entries);
9836         goto out;
9837 }
9838
9839 /**
9840  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9841  * @ioa_cfg:    ioa config struct
9842  *
9843  * Return value:
9844  *      none
9845  **/
9846 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9847 {
9848         int i;
9849
9850         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9851                 ioa_cfg->bus_attr[i].bus = i;
9852                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9853                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9854                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9855                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9856                 else
9857                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9858         }
9859 }
9860
9861 /**
9862  * ipr_init_regs - Initialize IOA registers
9863  * @ioa_cfg:    ioa config struct
9864  *
9865  * Return value:
9866  *      none
9867  **/
9868 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9869 {
9870         const struct ipr_interrupt_offsets *p;
9871         struct ipr_interrupts *t;
9872         void __iomem *base;
9873
9874         p = &ioa_cfg->chip_cfg->regs;
9875         t = &ioa_cfg->regs;
9876         base = ioa_cfg->hdw_dma_regs;
9877
9878         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9879         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9880         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9881         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9882         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9883         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9884         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9885         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9886         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9887         t->ioarrin_reg = base + p->ioarrin_reg;
9888         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9889         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9890         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9891         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9892         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9893         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9894
9895         if (ioa_cfg->sis64) {
9896                 t->init_feedback_reg = base + p->init_feedback_reg;
9897                 t->dump_addr_reg = base + p->dump_addr_reg;
9898                 t->dump_data_reg = base + p->dump_data_reg;
9899                 t->endian_swap_reg = base + p->endian_swap_reg;
9900         }
9901 }
9902
9903 /**
9904  * ipr_init_ioa_cfg - Initialize IOA config struct
9905  * @ioa_cfg:    ioa config struct
9906  * @host:               scsi host struct
9907  * @pdev:               PCI dev struct
9908  *
9909  * Return value:
9910  *      none
9911  **/
9912 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9913                              struct Scsi_Host *host, struct pci_dev *pdev)
9914 {
9915         int i;
9916
9917         ioa_cfg->host = host;
9918         ioa_cfg->pdev = pdev;
9919         ioa_cfg->log_level = ipr_log_level;
9920         ioa_cfg->doorbell = IPR_DOORBELL;
9921         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9922         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9923         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9924         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9925         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9926         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9927
9928         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9929         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9930         INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9931         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9932         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9933         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9934         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9935         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9936         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9937         ioa_cfg->sdt_state = INACTIVE;
9938
9939         ipr_initialize_bus_attr(ioa_cfg);
9940         ioa_cfg->max_devs_supported = ipr_max_devs;
9941
9942         if (ioa_cfg->sis64) {
9943                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9944                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9945                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9946                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9947                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9948                                            + ((sizeof(struct ipr_config_table_entry64)
9949                                                * ioa_cfg->max_devs_supported)));
9950         } else {
9951                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9952                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9953                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9954                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9955                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9956                                            + ((sizeof(struct ipr_config_table_entry)
9957                                                * ioa_cfg->max_devs_supported)));
9958         }
9959
9960         host->max_channel = IPR_VSET_BUS;
9961         host->unique_id = host->host_no;
9962         host->max_cmd_len = IPR_MAX_CDB_LEN;
9963         host->can_queue = ioa_cfg->max_cmds;
9964         pci_set_drvdata(pdev, ioa_cfg);
9965
9966         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9967                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9968                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9969                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9970                 if (i == 0)
9971                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9972                 else
9973                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9974         }
9975 }
9976
9977 /**
9978  * ipr_get_chip_info - Find adapter chip information
9979  * @dev_id:             PCI device id struct
9980  *
9981  * Return value:
9982  *      ptr to chip information on success / NULL on failure
9983  **/
9984 static const struct ipr_chip_t *
9985 ipr_get_chip_info(const struct pci_device_id *dev_id)
9986 {
9987         int i;
9988
9989         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9990                 if (ipr_chip[i].vendor == dev_id->vendor &&
9991                     ipr_chip[i].device == dev_id->device)
9992                         return &ipr_chip[i];
9993         return NULL;
9994 }
9995
9996 /**
9997  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9998  *                                              during probe time
9999  * @ioa_cfg:    ioa config struct
10000  *
10001  * Return value:
10002  *      None
10003  **/
10004 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10005 {
10006         struct pci_dev *pdev = ioa_cfg->pdev;
10007
10008         if (pci_channel_offline(pdev)) {
10009                 wait_event_timeout(ioa_cfg->eeh_wait_q,
10010                                    !pci_channel_offline(pdev),
10011                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10012                 pci_restore_state(pdev);
10013         }
10014 }
10015
10016 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10017 {
10018         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10019
10020         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10021                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10022                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10023                 ioa_cfg->vectors_info[vec_idx].
10024                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10025         }
10026 }
10027
10028 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10029                 struct pci_dev *pdev)
10030 {
10031         int i, rc;
10032
10033         for (i = 1; i < ioa_cfg->nvectors; i++) {
10034                 rc = request_irq(pci_irq_vector(pdev, i),
10035                         ipr_isr_mhrrq,
10036                         0,
10037                         ioa_cfg->vectors_info[i].desc,
10038                         &ioa_cfg->hrrq[i]);
10039                 if (rc) {
10040                         while (--i >= 0)
10041                                 free_irq(pci_irq_vector(pdev, i),
10042                                         &ioa_cfg->hrrq[i]);
10043                         return rc;
10044                 }
10045         }
10046         return 0;
10047 }
10048
10049 /**
10050  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10051  * @pdev:               PCI device struct
10052  *
10053  * Description: Simply set the msi_received flag to 1 indicating that
10054  * Message Signaled Interrupts are supported.
10055  *
10056  * Return value:
10057  *      0 on success / non-zero on failure
10058  **/
10059 static irqreturn_t ipr_test_intr(int irq, void *devp)
10060 {
10061         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10062         unsigned long lock_flags = 0;
10063         irqreturn_t rc = IRQ_HANDLED;
10064
10065         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
10066         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10067
10068         ioa_cfg->msi_received = 1;
10069         wake_up(&ioa_cfg->msi_wait_q);
10070
10071         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10072         return rc;
10073 }
10074
10075 /**
10076  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10077  * @pdev:               PCI device struct
10078  *
10079  * Description: This routine sets up and initiates a test interrupt to determine
10080  * if the interrupt is received via the ipr_test_intr() service routine.
10081  * If the tests fails, the driver will fall back to LSI.
10082  *
10083  * Return value:
10084  *      0 on success / non-zero on failure
10085  **/
10086 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
10087 {
10088         int rc;
10089         volatile u32 int_reg;
10090         unsigned long lock_flags = 0;
10091         int irq = pci_irq_vector(pdev, 0);
10092
10093         ENTER;
10094
10095         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10096         init_waitqueue_head(&ioa_cfg->msi_wait_q);
10097         ioa_cfg->msi_received = 0;
10098         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10099         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
10100         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10101         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10102
10103         rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
10104         if (rc) {
10105                 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
10106                 return rc;
10107         } else if (ipr_debug)
10108                 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
10109
10110         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
10111         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10112         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
10113         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10114         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10115
10116         if (!ioa_cfg->msi_received) {
10117                 /* MSI test failed */
10118                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
10119                 rc = -EOPNOTSUPP;
10120         } else if (ipr_debug)
10121                 dev_info(&pdev->dev, "MSI test succeeded.\n");
10122
10123         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10124
10125         free_irq(irq, ioa_cfg);
10126
10127         LEAVE;
10128
10129         return rc;
10130 }
10131
10132  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10133  * @pdev:               PCI device struct
10134  * @dev_id:             PCI device id struct
10135  *
10136  * Return value:
10137  *      0 on success / non-zero on failure
10138  **/
10139 static int ipr_probe_ioa(struct pci_dev *pdev,
10140                          const struct pci_device_id *dev_id)
10141 {
10142         struct ipr_ioa_cfg *ioa_cfg;
10143         struct Scsi_Host *host;
10144         unsigned long ipr_regs_pci;
10145         void __iomem *ipr_regs;
10146         int rc = PCIBIOS_SUCCESSFUL;
10147         volatile u32 mask, uproc, interrupts;
10148         unsigned long lock_flags, driver_lock_flags;
10149         unsigned int irq_flag;
10150
10151         ENTER;
10152
10153         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
10154         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10155
10156         if (!host) {
10157                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10158                 rc = -ENOMEM;
10159                 goto out;
10160         }
10161
10162         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10163         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
10164         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
10165
10166         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10167
10168         if (!ioa_cfg->ipr_chip) {
10169                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10170                         dev_id->vendor, dev_id->device);
10171                 goto out_scsi_host_put;
10172         }
10173
10174         /* set SIS 32 or SIS 64 */
10175         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10176         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10177         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10178         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10179
10180         if (ipr_transop_timeout)
10181                 ioa_cfg->transop_timeout = ipr_transop_timeout;
10182         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10183                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10184         else
10185                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10186
10187         ioa_cfg->revid = pdev->revision;
10188
10189         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10190
10191         ipr_regs_pci = pci_resource_start(pdev, 0);
10192
10193         rc = pci_request_regions(pdev, IPR_NAME);
10194         if (rc < 0) {
10195                 dev_err(&pdev->dev,
10196                         "Couldn't register memory range of registers\n");
10197                 goto out_scsi_host_put;
10198         }
10199
10200         rc = pci_enable_device(pdev);
10201
10202         if (rc || pci_channel_offline(pdev)) {
10203                 if (pci_channel_offline(pdev)) {
10204                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10205                         rc = pci_enable_device(pdev);
10206                 }
10207
10208                 if (rc) {
10209                         dev_err(&pdev->dev, "Cannot enable adapter\n");
10210                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10211                         goto out_release_regions;
10212                 }
10213         }
10214
10215         ipr_regs = pci_ioremap_bar(pdev, 0);
10216
10217         if (!ipr_regs) {
10218                 dev_err(&pdev->dev,
10219                         "Couldn't map memory range of registers\n");
10220                 rc = -ENOMEM;
10221                 goto out_disable;
10222         }
10223
10224         ioa_cfg->hdw_dma_regs = ipr_regs;
10225         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10226         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10227
10228         ipr_init_regs(ioa_cfg);
10229
10230         if (ioa_cfg->sis64) {
10231                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10232                 if (rc < 0) {
10233                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10234                         rc = dma_set_mask_and_coherent(&pdev->dev,
10235                                                        DMA_BIT_MASK(32));
10236                 }
10237         } else
10238                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10239
10240         if (rc < 0) {
10241                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10242                 goto cleanup_nomem;
10243         }
10244
10245         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10246                                    ioa_cfg->chip_cfg->cache_line_size);
10247
10248         if (rc != PCIBIOS_SUCCESSFUL) {
10249                 dev_err(&pdev->dev, "Write of cache line size failed\n");
10250                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10251                 rc = -EIO;
10252                 goto cleanup_nomem;
10253         }
10254
10255         /* Issue MMIO read to ensure card is not in EEH */
10256         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10257         ipr_wait_for_pci_err_recovery(ioa_cfg);
10258
10259         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10260                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10261                         IPR_MAX_MSIX_VECTORS);
10262                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10263         }
10264
10265         irq_flag = PCI_IRQ_LEGACY;
10266         if (ioa_cfg->ipr_chip->has_msi)
10267                 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10268         rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10269         if (rc < 0) {
10270                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10271                 goto cleanup_nomem;
10272         }
10273         ioa_cfg->nvectors = rc;
10274
10275         if (!pdev->msi_enabled && !pdev->msix_enabled)
10276                 ioa_cfg->clear_isr = 1;
10277
10278         pci_set_master(pdev);
10279
10280         if (pci_channel_offline(pdev)) {
10281                 ipr_wait_for_pci_err_recovery(ioa_cfg);
10282                 pci_set_master(pdev);
10283                 if (pci_channel_offline(pdev)) {
10284                         rc = -EIO;
10285                         goto out_msi_disable;
10286                 }
10287         }
10288
10289         if (pdev->msi_enabled || pdev->msix_enabled) {
10290                 rc = ipr_test_msi(ioa_cfg, pdev);
10291                 switch (rc) {
10292                 case 0:
10293                         dev_info(&pdev->dev,
10294                                 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10295                                 pdev->msix_enabled ? "-X" : "");
10296                         break;
10297                 case -EOPNOTSUPP:
10298                         ipr_wait_for_pci_err_recovery(ioa_cfg);
10299                         pci_free_irq_vectors(pdev);
10300
10301                         ioa_cfg->nvectors = 1;
10302                         ioa_cfg->clear_isr = 1;
10303                         break;
10304                 default:
10305                         goto out_msi_disable;
10306                 }
10307         }
10308
10309         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10310                                 (unsigned int)num_online_cpus(),
10311                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10312
10313         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10314                 goto out_msi_disable;
10315
10316         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10317                 goto out_msi_disable;
10318
10319         rc = ipr_alloc_mem(ioa_cfg);
10320         if (rc < 0) {
10321                 dev_err(&pdev->dev,
10322                         "Couldn't allocate enough memory for device driver!\n");
10323                 goto out_msi_disable;
10324         }
10325
10326         /* Save away PCI config space for use following IOA reset */
10327         rc = pci_save_state(pdev);
10328
10329         if (rc != PCIBIOS_SUCCESSFUL) {
10330                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10331                 rc = -EIO;
10332                 goto cleanup_nolog;
10333         }
10334
10335         /*
10336          * If HRRQ updated interrupt is not masked, or reset alert is set,
10337          * the card is in an unknown state and needs a hard reset
10338          */
10339         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10340         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10341         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10342         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10343                 ioa_cfg->needs_hard_reset = 1;
10344         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10345                 ioa_cfg->needs_hard_reset = 1;
10346         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10347                 ioa_cfg->ioa_unit_checked = 1;
10348
10349         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10350         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10351         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10352
10353         if (pdev->msi_enabled || pdev->msix_enabled) {
10354                 name_msi_vectors(ioa_cfg);
10355                 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
10356                         ioa_cfg->vectors_info[0].desc,
10357                         &ioa_cfg->hrrq[0]);
10358                 if (!rc)
10359                         rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
10360         } else {
10361                 rc = request_irq(pdev->irq, ipr_isr,
10362                          IRQF_SHARED,
10363                          IPR_NAME, &ioa_cfg->hrrq[0]);
10364         }
10365         if (rc) {
10366                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10367                         pdev->irq, rc);
10368                 goto cleanup_nolog;
10369         }
10370
10371         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10372             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10373                 ioa_cfg->needs_warm_reset = 1;
10374                 ioa_cfg->reset = ipr_reset_slot_reset;
10375
10376                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10377                                                                 WQ_MEM_RECLAIM, host->host_no);
10378
10379                 if (!ioa_cfg->reset_work_q) {
10380                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10381                         rc = -ENOMEM;
10382                         goto out_free_irq;
10383                 }
10384         } else
10385                 ioa_cfg->reset = ipr_reset_start_bist;
10386
10387         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10388         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10389         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10390
10391         LEAVE;
10392 out:
10393         return rc;
10394
10395 out_free_irq:
10396         ipr_free_irqs(ioa_cfg);
10397 cleanup_nolog:
10398         ipr_free_mem(ioa_cfg);
10399 out_msi_disable:
10400         ipr_wait_for_pci_err_recovery(ioa_cfg);
10401         pci_free_irq_vectors(pdev);
10402 cleanup_nomem:
10403         iounmap(ipr_regs);
10404 out_disable:
10405         pci_disable_device(pdev);
10406 out_release_regions:
10407         pci_release_regions(pdev);
10408 out_scsi_host_put:
10409         scsi_host_put(host);
10410         goto out;
10411 }
10412
10413 /**
10414  * ipr_initiate_ioa_bringdown - Bring down an adapter
10415  * @ioa_cfg:            ioa config struct
10416  * @shutdown_type:      shutdown type
10417  *
10418  * Description: This function will initiate bringing down the adapter.
10419  * This consists of issuing an IOA shutdown to the adapter
10420  * to flush the cache, and running BIST.
10421  * If the caller needs to wait on the completion of the reset,
10422  * the caller must sleep on the reset_wait_q.
10423  *
10424  * Return value:
10425  *      none
10426  **/
10427 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10428                                        enum ipr_shutdown_type shutdown_type)
10429 {
10430         ENTER;
10431         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10432                 ioa_cfg->sdt_state = ABORT_DUMP;
10433         ioa_cfg->reset_retries = 0;
10434         ioa_cfg->in_ioa_bringdown = 1;
10435         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10436         LEAVE;
10437 }
10438
10439 /**
10440  * __ipr_remove - Remove a single adapter
10441  * @pdev:       pci device struct
10442  *
10443  * Adapter hot plug remove entry point.
10444  *
10445  * Return value:
10446  *      none
10447  **/
10448 static void __ipr_remove(struct pci_dev *pdev)
10449 {
10450         unsigned long host_lock_flags = 0;
10451         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10452         int i;
10453         unsigned long driver_lock_flags;
10454         ENTER;
10455
10456         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10457         while (ioa_cfg->in_reset_reload) {
10458                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10459                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10460                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10461         }
10462
10463         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10464                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10465                 ioa_cfg->hrrq[i].removing_ioa = 1;
10466                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10467         }
10468         wmb();
10469         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10470
10471         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10472         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10473         flush_work(&ioa_cfg->work_q);
10474         if (ioa_cfg->reset_work_q)
10475                 flush_workqueue(ioa_cfg->reset_work_q);
10476         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10477         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10478
10479         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10480         list_del(&ioa_cfg->queue);
10481         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10482
10483         if (ioa_cfg->sdt_state == ABORT_DUMP)
10484                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10485         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10486
10487         ipr_free_all_resources(ioa_cfg);
10488
10489         LEAVE;
10490 }
10491
10492 /**
10493  * ipr_remove - IOA hot plug remove entry point
10494  * @pdev:       pci device struct
10495  *
10496  * Adapter hot plug remove entry point.
10497  *
10498  * Return value:
10499  *      none
10500  **/
10501 static void ipr_remove(struct pci_dev *pdev)
10502 {
10503         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10504
10505         ENTER;
10506
10507         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10508                               &ipr_trace_attr);
10509         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10510                              &ipr_dump_attr);
10511         sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10512                         &ipr_ioa_async_err_log);
10513         scsi_remove_host(ioa_cfg->host);
10514
10515         __ipr_remove(pdev);
10516
10517         LEAVE;
10518 }
10519
10520 /**
10521  * ipr_probe - Adapter hot plug add entry point
10522  *
10523  * Return value:
10524  *      0 on success / non-zero on failure
10525  **/
10526 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10527 {
10528         struct ipr_ioa_cfg *ioa_cfg;
10529         unsigned long flags;
10530         int rc, i;
10531
10532         rc = ipr_probe_ioa(pdev, dev_id);
10533
10534         if (rc)
10535                 return rc;
10536
10537         ioa_cfg = pci_get_drvdata(pdev);
10538         rc = ipr_probe_ioa_part2(ioa_cfg);
10539
10540         if (rc) {
10541                 __ipr_remove(pdev);
10542                 return rc;
10543         }
10544
10545         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10546
10547         if (rc) {
10548                 __ipr_remove(pdev);
10549                 return rc;
10550         }
10551
10552         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10553                                    &ipr_trace_attr);
10554
10555         if (rc) {
10556                 scsi_remove_host(ioa_cfg->host);
10557                 __ipr_remove(pdev);
10558                 return rc;
10559         }
10560
10561         rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10562                         &ipr_ioa_async_err_log);
10563
10564         if (rc) {
10565                 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10566                                 &ipr_dump_attr);
10567                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10568                                 &ipr_trace_attr);
10569                 scsi_remove_host(ioa_cfg->host);
10570                 __ipr_remove(pdev);
10571                 return rc;
10572         }
10573
10574         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10575                                    &ipr_dump_attr);
10576
10577         if (rc) {
10578                 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10579                                       &ipr_ioa_async_err_log);
10580                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10581                                       &ipr_trace_attr);
10582                 scsi_remove_host(ioa_cfg->host);
10583                 __ipr_remove(pdev);
10584                 return rc;
10585         }
10586         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10587         ioa_cfg->scan_enabled = 1;
10588         schedule_work(&ioa_cfg->work_q);
10589         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10590
10591         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10592
10593         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10594                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10595                         irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
10596                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10597                 }
10598         }
10599
10600         scsi_scan_host(ioa_cfg->host);
10601
10602         return 0;
10603 }
10604
10605 /**
10606  * ipr_shutdown - Shutdown handler.
10607  * @pdev:       pci device struct
10608  *
10609  * This function is invoked upon system shutdown/reboot. It will issue
10610  * an adapter shutdown to the adapter to flush the write cache.
10611  *
10612  * Return value:
10613  *      none
10614  **/
10615 static void ipr_shutdown(struct pci_dev *pdev)
10616 {
10617         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10618         unsigned long lock_flags = 0;
10619         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10620         int i;
10621
10622         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10623         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10624                 ioa_cfg->iopoll_weight = 0;
10625                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10626                         irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
10627         }
10628
10629         while (ioa_cfg->in_reset_reload) {
10630                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10631                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10632                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10633         }
10634
10635         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10636                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10637
10638         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10639         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10640         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10641         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10642                 ipr_free_irqs(ioa_cfg);
10643                 pci_disable_device(ioa_cfg->pdev);
10644         }
10645 }
10646
10647 static struct pci_device_id ipr_pci_table[] = {
10648         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10649                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10650         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10651                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10652         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10653                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10654         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10655                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10656         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10657                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10658         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10659                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10660         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10661                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10662         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10663                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10664                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10665         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10666               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10667         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10668               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10669               IPR_USE_LONG_TRANSOP_TIMEOUT },
10670         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10671               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10672               IPR_USE_LONG_TRANSOP_TIMEOUT },
10673         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10674               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10675         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10676               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10677               IPR_USE_LONG_TRANSOP_TIMEOUT},
10678         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10679               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10680               IPR_USE_LONG_TRANSOP_TIMEOUT },
10681         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10682               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10683               IPR_USE_LONG_TRANSOP_TIMEOUT },
10684         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10685               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10686         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10687               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10688         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10689               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10690               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10691         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10692                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10693         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10694                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10695         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10696                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10697                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10698         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10699                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10700                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10701         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10702                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10703         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10704                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10705         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10706                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10707         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10708                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10709         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10710                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10711         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10712                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10713         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10714                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10715         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10716                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10717         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10718                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10719         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10720                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10721         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10722                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10723         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10724                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10725         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10726                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10727         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10728                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10729         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10730                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10731         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10732                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10733         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10734                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10735         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10736                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10737         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10739         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10740                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10741         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10743         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10745         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10747         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10749         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10751         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10753         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10754                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10755         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10756                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10757         { }
10758 };
10759 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10760
10761 static const struct pci_error_handlers ipr_err_handler = {
10762         .error_detected = ipr_pci_error_detected,
10763         .mmio_enabled = ipr_pci_mmio_enabled,
10764         .slot_reset = ipr_pci_slot_reset,
10765 };
10766
10767 static struct pci_driver ipr_driver = {
10768         .name = IPR_NAME,
10769         .id_table = ipr_pci_table,
10770         .probe = ipr_probe,
10771         .remove = ipr_remove,
10772         .shutdown = ipr_shutdown,
10773         .err_handler = &ipr_err_handler,
10774 };
10775
10776 /**
10777  * ipr_halt_done - Shutdown prepare completion
10778  *
10779  * Return value:
10780  *      none
10781  **/
10782 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10783 {
10784         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10785 }
10786
10787 /**
10788  * ipr_halt - Issue shutdown prepare to all adapters
10789  *
10790  * Return value:
10791  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10792  **/
10793 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10794 {
10795         struct ipr_cmnd *ipr_cmd;
10796         struct ipr_ioa_cfg *ioa_cfg;
10797         unsigned long flags = 0, driver_lock_flags;
10798
10799         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10800                 return NOTIFY_DONE;
10801
10802         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10803
10804         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10805                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10806                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10807                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10808                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10809                         continue;
10810                 }
10811
10812                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10813                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10814                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10815                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10816                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10817
10818                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10819                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10820         }
10821         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10822
10823         return NOTIFY_OK;
10824 }
10825
10826 static struct notifier_block ipr_notifier = {
10827         ipr_halt, NULL, 0
10828 };
10829
10830 /**
10831  * ipr_init - Module entry point
10832  *
10833  * Return value:
10834  *      0 on success / negative value on failure
10835  **/
10836 static int __init ipr_init(void)
10837 {
10838         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10839                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10840
10841         register_reboot_notifier(&ipr_notifier);
10842         return pci_register_driver(&ipr_driver);
10843 }
10844
10845 /**
10846  * ipr_exit - Module unload
10847  *
10848  * Module unload entry point.
10849  *
10850  * Return value:
10851  *      none
10852  **/
10853 static void __exit ipr_exit(void)
10854 {
10855         unregister_reboot_notifier(&ipr_notifier);
10856         pci_unregister_driver(&ipr_driver);
10857 }
10858
10859 module_init(ipr_init);
10860 module_exit(ipr_exit);