]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/ipr.c
200110caae175badd47ef7c46eb0f168d7da2440
[karo-tx-linux.git] / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
198 };
199
200 static int ipr_max_bus_speeds[] = {
201         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202 };
203
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed, ipr_max_speed, uint, 0);
207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level, ipr_log_level, uint, 0);
209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode, ipr_testmode, int, 0);
211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs, ipr_max_devs, int, 0);
221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION);
229
230 /*  A constant array of IOASCs/URCs/Error Messages */
231 static const
232 struct ipr_error_table_t ipr_error_table[] = {
233         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
234         "8155: An unknown error was received"},
235         {0x00330000, 0, 0,
236         "Soft underlength error"},
237         {0x005A0000, 0, 0,
238         "Command to be cancelled not found"},
239         {0x00808000, 0, 0,
240         "Qualified success"},
241         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
242         "FFFE: Soft device bus error recovered by the IOA"},
243         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
244         "4101: Soft device bus fabric error"},
245         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246         "FFFC: Logical block guard error recovered by the device"},
247         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248         "FFFC: Logical block reference tag error recovered by the device"},
249         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250         "4171: Recovered scatter list tag / sequence number error"},
251         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256         "FFFD: Recovered logical block reference tag error detected by the IOA"},
257         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258         "FFFD: Logical block guard error recovered by the IOA"},
259         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
260         "FFF9: Device sector reassign successful"},
261         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
262         "FFF7: Media error recovered by device rewrite procedures"},
263         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
264         "7001: IOA sector reassignment successful"},
265         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
266         "FFF9: Soft media error. Sector reassignment recommended"},
267         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
268         "FFF7: Media error recovered by IOA rewrite procedures"},
269         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
270         "FF3D: Soft PCI bus error recovered by the IOA"},
271         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
272         "FFF6: Device hardware error recovered by the IOA"},
273         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
274         "FFF6: Device hardware error recovered by the device"},
275         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
276         "FF3D: Soft IOA error recovered by the IOA"},
277         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
278         "FFFA: Undefined device response recovered by the IOA"},
279         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280         "FFF6: Device bus error, message or command phase"},
281         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
282         "FFFE: Task Management Function failed"},
283         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
284         "FFF6: Failure prediction threshold exceeded"},
285         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
286         "8009: Impending cache battery pack failure"},
287         {0x02040100, 0, 0,
288         "Logical Unit in process of becoming ready"},
289         {0x02040200, 0, 0,
290         "Initializing command required"},
291         {0x02040400, 0, 0,
292         "34FF: Disk device format in progress"},
293         {0x02040C00, 0, 0,
294         "Logical unit not accessible, target port in unavailable state"},
295         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296         "9070: IOA requested reset"},
297         {0x023F0000, 0, 0,
298         "Synchronization required"},
299         {0x02408500, 0, 0,
300         "IOA microcode download required"},
301         {0x02408600, 0, 0,
302         "Device bus connection is prohibited by host"},
303         {0x024E0000, 0, 0,
304         "No ready, IOA shutdown"},
305         {0x025A0000, 0, 0,
306         "Not ready, IOA has been shutdown"},
307         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
308         "3020: Storage subsystem configuration error"},
309         {0x03110B00, 0, 0,
310         "FFF5: Medium error, data unreadable, recommend reassign"},
311         {0x03110C00, 0, 0,
312         "7000: Medium error, data unreadable, do not reassign"},
313         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
314         "FFF3: Disk media format bad"},
315         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
316         "3002: Addressed device failed to respond to selection"},
317         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
318         "3100: Device bus error"},
319         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
320         "3109: IOA timed out a device command"},
321         {0x04088000, 0, 0,
322         "3120: SCSI bus is not operational"},
323         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
324         "4100: Hard device bus fabric error"},
325         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326         "310C: Logical block guard error detected by the device"},
327         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328         "310C: Logical block reference tag error detected by the device"},
329         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330         "4170: Scatter list tag / sequence number error"},
331         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332         "8150: Logical block CRC error on IOA to Host transfer"},
333         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334         "4170: Logical block sequence number error on IOA to Host transfer"},
335         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336         "310D: Logical block reference tag error detected by the IOA"},
337         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338         "310D: Logical block guard error detected by the IOA"},
339         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
340         "9000: IOA reserved area data check"},
341         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
342         "9001: IOA reserved area invalid data pattern"},
343         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
344         "9002: IOA reserved area LRC error"},
345         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346         "Hardware Error, IOA metadata access error"},
347         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
348         "102E: Out of alternate sectors for disk storage"},
349         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
350         "FFF4: Data transfer underlength error"},
351         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
352         "FFF4: Data transfer overlength error"},
353         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
354         "3400: Logical unit failure"},
355         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
356         "FFF4: Device microcode is corrupt"},
357         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
358         "8150: PCI bus error"},
359         {0x04430000, 1, 0,
360         "Unsupported device bus message received"},
361         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
362         "FFF4: Disk device problem"},
363         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
364         "8150: Permanent IOA failure"},
365         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
366         "3010: Disk device returned wrong response to IOA"},
367         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
368         "8151: IOA microcode error"},
369         {0x04448500, 0, 0,
370         "Device bus status error"},
371         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
372         "8157: IOA error requiring IOA reset to recover"},
373         {0x04448700, 0, 0,
374         "ATA device status error"},
375         {0x04490000, 0, 0,
376         "Message reject received from the device"},
377         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
378         "8008: A permanent cache battery pack failure occurred"},
379         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
380         "9090: Disk unit has been modified after the last known status"},
381         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
382         "9081: IOA detected device error"},
383         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
384         "9082: IOA detected device error"},
385         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
386         "3110: Device bus error, message or command phase"},
387         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
388         "3110: SAS Command / Task Management Function failed"},
389         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
390         "9091: Incorrect hardware configuration change has been detected"},
391         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
392         "9073: Invalid multi-adapter configuration"},
393         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
394         "4010: Incorrect connection between cascaded expanders"},
395         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
396         "4020: Connections exceed IOA design limits"},
397         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
398         "4030: Incorrect multipath connection"},
399         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
400         "4110: Unsupported enclosure function"},
401         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402         "4120: SAS cable VPD cannot be read"},
403         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
404         "FFF4: Command to logical unit failed"},
405         {0x05240000, 1, 0,
406         "Illegal request, invalid request type or request packet"},
407         {0x05250000, 0, 0,
408         "Illegal request, invalid resource handle"},
409         {0x05258000, 0, 0,
410         "Illegal request, commands not allowed to this device"},
411         {0x05258100, 0, 0,
412         "Illegal request, command not allowed to a secondary adapter"},
413         {0x05258200, 0, 0,
414         "Illegal request, command not allowed to a non-optimized resource"},
415         {0x05260000, 0, 0,
416         "Illegal request, invalid field in parameter list"},
417         {0x05260100, 0, 0,
418         "Illegal request, parameter not supported"},
419         {0x05260200, 0, 0,
420         "Illegal request, parameter value invalid"},
421         {0x052C0000, 0, 0,
422         "Illegal request, command sequence error"},
423         {0x052C8000, 1, 0,
424         "Illegal request, dual adapter support not enabled"},
425         {0x052C8100, 1, 0,
426         "Illegal request, another cable connector was physically disabled"},
427         {0x054E8000, 1, 0,
428         "Illegal request, inconsistent group id/group count"},
429         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
430         "9031: Array protection temporarily suspended, protection resuming"},
431         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
432         "9040: Array protection temporarily suspended, protection resuming"},
433         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434         "4080: IOA exceeded maximum operating temperature"},
435         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436         "4085: Service required"},
437         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
438         "3140: Device bus not ready to ready transition"},
439         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
440         "FFFB: SCSI bus was reset"},
441         {0x06290500, 0, 0,
442         "FFFE: SCSI bus transition to single ended"},
443         {0x06290600, 0, 0,
444         "FFFE: SCSI bus transition to LVD"},
445         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
446         "FFFB: SCSI bus was reset by another initiator"},
447         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
448         "3029: A device replacement has occurred"},
449         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450         "4102: Device bus fabric performance degradation"},
451         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
452         "9051: IOA cache data exists for a missing or failed device"},
453         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
454         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
456         "9025: Disk unit is not supported at its physical location"},
457         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
458         "3020: IOA detected a SCSI bus configuration error"},
459         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
460         "3150: SCSI bus configuration error"},
461         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
462         "9074: Asymmetric advanced function disk configuration"},
463         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
464         "4040: Incomplete multipath connection between IOA and enclosure"},
465         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
466         "4041: Incomplete multipath connection between enclosure and device"},
467         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
468         "9075: Incomplete multipath connection between IOA and remote IOA"},
469         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
470         "9076: Configuration error, missing remote IOA"},
471         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
472         "4050: Enclosure does not support a required multipath function"},
473         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474         "4121: Configuration error, required cable is missing"},
475         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476         "4122: Cable is not plugged into the correct location on remote IOA"},
477         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478         "4123: Configuration error, invalid cable vital product data"},
479         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480         "4124: Configuration error, both cable ends are plugged into the same IOA"},
481         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482         "4070: Logically bad block written on device"},
483         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
484         "9041: Array protection temporarily suspended"},
485         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
486         "9042: Corrupt array parity detected on specified device"},
487         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
488         "9030: Array no longer protected due to missing or failed disk unit"},
489         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
490         "9071: Link operational transition"},
491         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
492         "9072: Link not operational transition"},
493         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
494         "9032: Array exposed but still protected"},
495         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496         "70DD: Device forced failed by disrupt device command"},
497         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
498         "4061: Multipath redundancy level got better"},
499         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
500         "4060: Multipath redundancy level got worse"},
501         {0x07270000, 0, 0,
502         "Failure due to other device"},
503         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
504         "9008: IOA does not support functions expected by devices"},
505         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
506         "9010: Cache data associated with attached devices cannot be found"},
507         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
508         "9011: Cache data belongs to devices other than those attached"},
509         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
510         "9020: Array missing 2 or more devices with only 1 device present"},
511         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
512         "9021: Array missing 2 or more devices with 2 or more devices present"},
513         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
514         "9022: Exposed array is missing a required device"},
515         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
516         "9023: Array member(s) not at required physical locations"},
517         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
518         "9024: Array not functional due to present hardware configuration"},
519         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
520         "9026: Array not functional due to present hardware configuration"},
521         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
522         "9027: Array is missing a device and parity is out of sync"},
523         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
524         "9028: Maximum number of arrays already exist"},
525         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
526         "9050: Required cache data cannot be located for a disk unit"},
527         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
528         "9052: Cache data exists for a device that has been modified"},
529         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
530         "9054: IOA resources not available due to previous problems"},
531         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
532         "9092: Disk unit requires initialization before use"},
533         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
534         "9029: Incorrect hardware configuration change has been detected"},
535         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
536         "9060: One or more disk pairs are missing from an array"},
537         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
538         "9061: One or more disks are missing from an array"},
539         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
540         "9062: One or more disks are missing from an array"},
541         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
542         "9063: Maximum number of functional arrays has been exceeded"},
543         {0x07279A00, 0, 0,
544         "Data protect, other volume set problem"},
545         {0x0B260000, 0, 0,
546         "Aborted command, invalid descriptor"},
547         {0x0B3F9000, 0, 0,
548         "Target operating conditions have changed, dual adapter takeover"},
549         {0x0B530200, 0, 0,
550         "Aborted command, medium removal prevented"},
551         {0x0B5A0000, 0, 0,
552         "Command terminated by host"},
553         {0x0B5B8000, 0, 0,
554         "Aborted command, command terminated by host"}
555 };
556
557 static const struct ipr_ses_table_entry ipr_ses_table[] = {
558         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
559         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
560         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
561         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
562         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
563         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
564         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
565         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
566         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
567         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
568         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
569         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
570         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
571 };
572
573 /*
574  *  Function Prototypes
575  */
576 static int ipr_reset_alert(struct ipr_cmnd *);
577 static void ipr_process_ccn(struct ipr_cmnd *);
578 static void ipr_process_error(struct ipr_cmnd *);
579 static void ipr_reset_ioa_job(struct ipr_cmnd *);
580 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
581                                    enum ipr_shutdown_type);
582
583 #ifdef CONFIG_SCSI_IPR_TRACE
584 /**
585  * ipr_trc_hook - Add a trace entry to the driver trace
586  * @ipr_cmd:    ipr command struct
587  * @type:               trace type
588  * @add_data:   additional data
589  *
590  * Return value:
591  *      none
592  **/
593 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
594                          u8 type, u32 add_data)
595 {
596         struct ipr_trace_entry *trace_entry;
597         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
598
599         trace_entry = &ioa_cfg->trace[atomic_add_return
600                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
601         trace_entry->time = jiffies;
602         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
603         trace_entry->type = type;
604         if (ipr_cmd->ioa_cfg->sis64)
605                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
606         else
607                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
608         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
609         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
610         trace_entry->u.add_data = add_data;
611         wmb();
612 }
613 #else
614 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
615 #endif
616
617 /**
618  * ipr_lock_and_done - Acquire lock and complete command
619  * @ipr_cmd:    ipr command struct
620  *
621  * Return value:
622  *      none
623  **/
624 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
625 {
626         unsigned long lock_flags;
627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
628
629         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
630         ipr_cmd->done(ipr_cmd);
631         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
632 }
633
634 /**
635  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
636  * @ipr_cmd:    ipr command struct
637  *
638  * Return value:
639  *      none
640  **/
641 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
642 {
643         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
644         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
645         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
646         dma_addr_t dma_addr = ipr_cmd->dma_addr;
647         int hrrq_id;
648
649         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
650         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
651         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
652         ioarcb->data_transfer_length = 0;
653         ioarcb->read_data_transfer_length = 0;
654         ioarcb->ioadl_len = 0;
655         ioarcb->read_ioadl_len = 0;
656
657         if (ipr_cmd->ioa_cfg->sis64) {
658                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
659                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
660                 ioasa64->u.gata.status = 0;
661         } else {
662                 ioarcb->write_ioadl_addr =
663                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
664                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
665                 ioasa->u.gata.status = 0;
666         }
667
668         ioasa->hdr.ioasc = 0;
669         ioasa->hdr.residual_data_len = 0;
670         ipr_cmd->scsi_cmd = NULL;
671         ipr_cmd->qc = NULL;
672         ipr_cmd->sense_buffer[0] = 0;
673         ipr_cmd->dma_use_sg = 0;
674 }
675
676 /**
677  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
678  * @ipr_cmd:    ipr command struct
679  *
680  * Return value:
681  *      none
682  **/
683 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
684                               void (*fast_done) (struct ipr_cmnd *))
685 {
686         ipr_reinit_ipr_cmnd(ipr_cmd);
687         ipr_cmd->u.scratch = 0;
688         ipr_cmd->sibling = NULL;
689         ipr_cmd->eh_comp = NULL;
690         ipr_cmd->fast_done = fast_done;
691         init_timer(&ipr_cmd->timer);
692 }
693
694 /**
695  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
696  * @ioa_cfg:    ioa config struct
697  *
698  * Return value:
699  *      pointer to ipr command struct
700  **/
701 static
702 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
703 {
704         struct ipr_cmnd *ipr_cmd = NULL;
705
706         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
707                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
708                         struct ipr_cmnd, queue);
709                 list_del(&ipr_cmd->queue);
710         }
711
712
713         return ipr_cmd;
714 }
715
716 /**
717  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
718  * @ioa_cfg:    ioa config struct
719  *
720  * Return value:
721  *      pointer to ipr command struct
722  **/
723 static
724 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
725 {
726         struct ipr_cmnd *ipr_cmd =
727                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
728         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
729         return ipr_cmd;
730 }
731
732 /**
733  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
734  * @ioa_cfg:    ioa config struct
735  * @clr_ints:     interrupts to clear
736  *
737  * This function masks all interrupts on the adapter, then clears the
738  * interrupts specified in the mask
739  *
740  * Return value:
741  *      none
742  **/
743 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
744                                           u32 clr_ints)
745 {
746         volatile u32 int_reg;
747         int i;
748
749         /* Stop new interrupts */
750         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
751                 spin_lock(&ioa_cfg->hrrq[i]._lock);
752                 ioa_cfg->hrrq[i].allow_interrupts = 0;
753                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
754         }
755         wmb();
756
757         /* Set interrupt mask to stop all new interrupts */
758         if (ioa_cfg->sis64)
759                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
760         else
761                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
762
763         /* Clear any pending interrupts */
764         if (ioa_cfg->sis64)
765                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
766         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
767         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
768 }
769
770 /**
771  * ipr_save_pcix_cmd_reg - Save PCI-X command register
772  * @ioa_cfg:    ioa config struct
773  *
774  * Return value:
775  *      0 on success / -EIO on failure
776  **/
777 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
778 {
779         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
780
781         if (pcix_cmd_reg == 0)
782                 return 0;
783
784         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
785                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
786                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
787                 return -EIO;
788         }
789
790         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
791         return 0;
792 }
793
794 /**
795  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
796  * @ioa_cfg:    ioa config struct
797  *
798  * Return value:
799  *      0 on success / -EIO on failure
800  **/
801 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
802 {
803         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
804
805         if (pcix_cmd_reg) {
806                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
807                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
808                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
809                         return -EIO;
810                 }
811         }
812
813         return 0;
814 }
815
816 /**
817  * ipr_sata_eh_done - done function for aborted SATA commands
818  * @ipr_cmd:    ipr command struct
819  *
820  * This function is invoked for ops generated to SATA
821  * devices which are being aborted.
822  *
823  * Return value:
824  *      none
825  **/
826 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
827 {
828         struct ata_queued_cmd *qc = ipr_cmd->qc;
829         struct ipr_sata_port *sata_port = qc->ap->private_data;
830
831         qc->err_mask |= AC_ERR_OTHER;
832         sata_port->ioasa.status |= ATA_BUSY;
833         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
834         ata_qc_complete(qc);
835 }
836
837 /**
838  * ipr_scsi_eh_done - mid-layer done function for aborted ops
839  * @ipr_cmd:    ipr command struct
840  *
841  * This function is invoked by the interrupt handler for
842  * ops generated by the SCSI mid-layer which are being aborted.
843  *
844  * Return value:
845  *      none
846  **/
847 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
848 {
849         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
850
851         scsi_cmd->result |= (DID_ERROR << 16);
852
853         scsi_dma_unmap(ipr_cmd->scsi_cmd);
854         scsi_cmd->scsi_done(scsi_cmd);
855         if (ipr_cmd->eh_comp)
856                 complete(ipr_cmd->eh_comp);
857         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
858 }
859
860 /**
861  * ipr_fail_all_ops - Fails all outstanding ops.
862  * @ioa_cfg:    ioa config struct
863  *
864  * This function fails all outstanding ops.
865  *
866  * Return value:
867  *      none
868  **/
869 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
870 {
871         struct ipr_cmnd *ipr_cmd, *temp;
872         struct ipr_hrr_queue *hrrq;
873
874         ENTER;
875         for_each_hrrq(hrrq, ioa_cfg) {
876                 spin_lock(&hrrq->_lock);
877                 list_for_each_entry_safe(ipr_cmd,
878                                         temp, &hrrq->hrrq_pending_q, queue) {
879                         list_del(&ipr_cmd->queue);
880
881                         ipr_cmd->s.ioasa.hdr.ioasc =
882                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
883                         ipr_cmd->s.ioasa.hdr.ilid =
884                                 cpu_to_be32(IPR_DRIVER_ILID);
885
886                         if (ipr_cmd->scsi_cmd)
887                                 ipr_cmd->done = ipr_scsi_eh_done;
888                         else if (ipr_cmd->qc)
889                                 ipr_cmd->done = ipr_sata_eh_done;
890
891                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
892                                      IPR_IOASC_IOA_WAS_RESET);
893                         del_timer(&ipr_cmd->timer);
894                         ipr_cmd->done(ipr_cmd);
895                 }
896                 spin_unlock(&hrrq->_lock);
897         }
898         LEAVE;
899 }
900
901 /**
902  * ipr_send_command -  Send driver initiated requests.
903  * @ipr_cmd:            ipr command struct
904  *
905  * This function sends a command to the adapter using the correct write call.
906  * In the case of sis64, calculate the ioarcb size required. Then or in the
907  * appropriate bits.
908  *
909  * Return value:
910  *      none
911  **/
912 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
913 {
914         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
915         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
916
917         if (ioa_cfg->sis64) {
918                 /* The default size is 256 bytes */
919                 send_dma_addr |= 0x1;
920
921                 /* If the number of ioadls * size of ioadl > 128 bytes,
922                    then use a 512 byte ioarcb */
923                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
924                         send_dma_addr |= 0x4;
925                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
926         } else
927                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
928 }
929
930 /**
931  * ipr_do_req -  Send driver initiated requests.
932  * @ipr_cmd:            ipr command struct
933  * @done:                       done function
934  * @timeout_func:       timeout function
935  * @timeout:            timeout value
936  *
937  * This function sends the specified command to the adapter with the
938  * timeout given. The done function is invoked on command completion.
939  *
940  * Return value:
941  *      none
942  **/
943 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
944                        void (*done) (struct ipr_cmnd *),
945                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
946 {
947         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
948
949         ipr_cmd->done = done;
950
951         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
952         ipr_cmd->timer.expires = jiffies + timeout;
953         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
954
955         add_timer(&ipr_cmd->timer);
956
957         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
958
959         ipr_send_command(ipr_cmd);
960 }
961
962 /**
963  * ipr_internal_cmd_done - Op done function for an internally generated op.
964  * @ipr_cmd:    ipr command struct
965  *
966  * This function is the op done function for an internally generated,
967  * blocking op. It simply wakes the sleeping thread.
968  *
969  * Return value:
970  *      none
971  **/
972 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
973 {
974         if (ipr_cmd->sibling)
975                 ipr_cmd->sibling = NULL;
976         else
977                 complete(&ipr_cmd->completion);
978 }
979
980 /**
981  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
982  * @ipr_cmd:    ipr command struct
983  * @dma_addr:   dma address
984  * @len:        transfer length
985  * @flags:      ioadl flag value
986  *
987  * This function initializes an ioadl in the case where there is only a single
988  * descriptor.
989  *
990  * Return value:
991  *      nothing
992  **/
993 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
994                            u32 len, int flags)
995 {
996         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
997         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
998
999         ipr_cmd->dma_use_sg = 1;
1000
1001         if (ipr_cmd->ioa_cfg->sis64) {
1002                 ioadl64->flags = cpu_to_be32(flags);
1003                 ioadl64->data_len = cpu_to_be32(len);
1004                 ioadl64->address = cpu_to_be64(dma_addr);
1005
1006                 ipr_cmd->ioarcb.ioadl_len =
1007                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1008                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1009         } else {
1010                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1011                 ioadl->address = cpu_to_be32(dma_addr);
1012
1013                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1014                         ipr_cmd->ioarcb.read_ioadl_len =
1015                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1016                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1017                 } else {
1018                         ipr_cmd->ioarcb.ioadl_len =
1019                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1020                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1021                 }
1022         }
1023 }
1024
1025 /**
1026  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1027  * @ipr_cmd:    ipr command struct
1028  * @timeout_func:       function to invoke if command times out
1029  * @timeout:    timeout
1030  *
1031  * Return value:
1032  *      none
1033  **/
1034 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1035                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1036                                   u32 timeout)
1037 {
1038         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1039
1040         init_completion(&ipr_cmd->completion);
1041         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1042
1043         spin_unlock_irq(ioa_cfg->host->host_lock);
1044         wait_for_completion(&ipr_cmd->completion);
1045         spin_lock_irq(ioa_cfg->host->host_lock);
1046 }
1047
1048 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1049 {
1050         if (ioa_cfg->hrrq_num == 1)
1051                 return 0;
1052         else
1053                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1054 }
1055
1056 /**
1057  * ipr_send_hcam - Send an HCAM to the adapter.
1058  * @ioa_cfg:    ioa config struct
1059  * @type:               HCAM type
1060  * @hostrcb:    hostrcb struct
1061  *
1062  * This function will send a Host Controlled Async command to the adapter.
1063  * If HCAMs are currently not allowed to be issued to the adapter, it will
1064  * place the hostrcb on the free queue.
1065  *
1066  * Return value:
1067  *      none
1068  **/
1069 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1070                           struct ipr_hostrcb *hostrcb)
1071 {
1072         struct ipr_cmnd *ipr_cmd;
1073         struct ipr_ioarcb *ioarcb;
1074
1075         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1076                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1077                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1078                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1079
1080                 ipr_cmd->u.hostrcb = hostrcb;
1081                 ioarcb = &ipr_cmd->ioarcb;
1082
1083                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1084                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1085                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1086                 ioarcb->cmd_pkt.cdb[1] = type;
1087                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1088                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1089
1090                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1091                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1092
1093                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1094                         ipr_cmd->done = ipr_process_ccn;
1095                 else
1096                         ipr_cmd->done = ipr_process_error;
1097
1098                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1099
1100                 ipr_send_command(ipr_cmd);
1101         } else {
1102                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1103         }
1104 }
1105
1106 /**
1107  * ipr_update_ata_class - Update the ata class in the resource entry
1108  * @res:        resource entry struct
1109  * @proto:      cfgte device bus protocol value
1110  *
1111  * Return value:
1112  *      none
1113  **/
1114 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1115 {
1116         switch (proto) {
1117         case IPR_PROTO_SATA:
1118         case IPR_PROTO_SAS_STP:
1119                 res->ata_class = ATA_DEV_ATA;
1120                 break;
1121         case IPR_PROTO_SATA_ATAPI:
1122         case IPR_PROTO_SAS_STP_ATAPI:
1123                 res->ata_class = ATA_DEV_ATAPI;
1124                 break;
1125         default:
1126                 res->ata_class = ATA_DEV_UNKNOWN;
1127                 break;
1128         };
1129 }
1130
1131 /**
1132  * ipr_init_res_entry - Initialize a resource entry struct.
1133  * @res:        resource entry struct
1134  * @cfgtew:     config table entry wrapper struct
1135  *
1136  * Return value:
1137  *      none
1138  **/
1139 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1140                                struct ipr_config_table_entry_wrapper *cfgtew)
1141 {
1142         int found = 0;
1143         unsigned int proto;
1144         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1145         struct ipr_resource_entry *gscsi_res = NULL;
1146
1147         res->needs_sync_complete = 0;
1148         res->in_erp = 0;
1149         res->add_to_ml = 0;
1150         res->del_from_ml = 0;
1151         res->resetting_device = 0;
1152         res->reset_occurred = 0;
1153         res->sdev = NULL;
1154         res->sata_port = NULL;
1155
1156         if (ioa_cfg->sis64) {
1157                 proto = cfgtew->u.cfgte64->proto;
1158                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1159                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1160                 res->type = cfgtew->u.cfgte64->res_type;
1161
1162                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1163                         sizeof(res->res_path));
1164
1165                 res->bus = 0;
1166                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1167                         sizeof(res->dev_lun.scsi_lun));
1168                 res->lun = scsilun_to_int(&res->dev_lun);
1169
1170                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1171                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1172                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1173                                         found = 1;
1174                                         res->target = gscsi_res->target;
1175                                         break;
1176                                 }
1177                         }
1178                         if (!found) {
1179                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1180                                                                   ioa_cfg->max_devs_supported);
1181                                 set_bit(res->target, ioa_cfg->target_ids);
1182                         }
1183                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1184                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1185                         res->target = 0;
1186                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1187                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1188                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1189                                                           ioa_cfg->max_devs_supported);
1190                         set_bit(res->target, ioa_cfg->array_ids);
1191                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1192                         res->bus = IPR_VSET_VIRTUAL_BUS;
1193                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1194                                                           ioa_cfg->max_devs_supported);
1195                         set_bit(res->target, ioa_cfg->vset_ids);
1196                 } else {
1197                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1198                                                           ioa_cfg->max_devs_supported);
1199                         set_bit(res->target, ioa_cfg->target_ids);
1200                 }
1201         } else {
1202                 proto = cfgtew->u.cfgte->proto;
1203                 res->qmodel = IPR_QUEUEING_MODEL(res);
1204                 res->flags = cfgtew->u.cfgte->flags;
1205                 if (res->flags & IPR_IS_IOA_RESOURCE)
1206                         res->type = IPR_RES_TYPE_IOAFP;
1207                 else
1208                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1209
1210                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1211                 res->target = cfgtew->u.cfgte->res_addr.target;
1212                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1213                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1214         }
1215
1216         ipr_update_ata_class(res, proto);
1217 }
1218
1219 /**
1220  * ipr_is_same_device - Determine if two devices are the same.
1221  * @res:        resource entry struct
1222  * @cfgtew:     config table entry wrapper struct
1223  *
1224  * Return value:
1225  *      1 if the devices are the same / 0 otherwise
1226  **/
1227 static int ipr_is_same_device(struct ipr_resource_entry *res,
1228                               struct ipr_config_table_entry_wrapper *cfgtew)
1229 {
1230         if (res->ioa_cfg->sis64) {
1231                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1232                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1233                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1234                                         sizeof(cfgtew->u.cfgte64->lun))) {
1235                         return 1;
1236                 }
1237         } else {
1238                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1239                     res->target == cfgtew->u.cfgte->res_addr.target &&
1240                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1241                         return 1;
1242         }
1243
1244         return 0;
1245 }
1246
1247 /**
1248  * __ipr_format_res_path - Format the resource path for printing.
1249  * @res_path:   resource path
1250  * @buf:        buffer
1251  * @len:        length of buffer provided
1252  *
1253  * Return value:
1254  *      pointer to buffer
1255  **/
1256 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1257 {
1258         int i;
1259         char *p = buffer;
1260
1261         *p = '\0';
1262         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1263         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1264                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1265
1266         return buffer;
1267 }
1268
1269 /**
1270  * ipr_format_res_path - Format the resource path for printing.
1271  * @ioa_cfg:    ioa config struct
1272  * @res_path:   resource path
1273  * @buf:        buffer
1274  * @len:        length of buffer provided
1275  *
1276  * Return value:
1277  *      pointer to buffer
1278  **/
1279 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1280                                  u8 *res_path, char *buffer, int len)
1281 {
1282         char *p = buffer;
1283
1284         *p = '\0';
1285         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1286         __ipr_format_res_path(res_path, p, len - (buffer - p));
1287         return buffer;
1288 }
1289
1290 /**
1291  * ipr_update_res_entry - Update the resource entry.
1292  * @res:        resource entry struct
1293  * @cfgtew:     config table entry wrapper struct
1294  *
1295  * Return value:
1296  *      none
1297  **/
1298 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1299                                  struct ipr_config_table_entry_wrapper *cfgtew)
1300 {
1301         char buffer[IPR_MAX_RES_PATH_LENGTH];
1302         unsigned int proto;
1303         int new_path = 0;
1304
1305         if (res->ioa_cfg->sis64) {
1306                 res->flags = cfgtew->u.cfgte64->flags;
1307                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1308                 res->type = cfgtew->u.cfgte64->res_type;
1309
1310                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1311                         sizeof(struct ipr_std_inq_data));
1312
1313                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1314                 proto = cfgtew->u.cfgte64->proto;
1315                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1316                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1317
1318                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1319                         sizeof(res->dev_lun.scsi_lun));
1320
1321                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1322                                         sizeof(res->res_path))) {
1323                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1324                                 sizeof(res->res_path));
1325                         new_path = 1;
1326                 }
1327
1328                 if (res->sdev && new_path)
1329                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1330                                     ipr_format_res_path(res->ioa_cfg,
1331                                         res->res_path, buffer, sizeof(buffer)));
1332         } else {
1333                 res->flags = cfgtew->u.cfgte->flags;
1334                 if (res->flags & IPR_IS_IOA_RESOURCE)
1335                         res->type = IPR_RES_TYPE_IOAFP;
1336                 else
1337                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1338
1339                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1340                         sizeof(struct ipr_std_inq_data));
1341
1342                 res->qmodel = IPR_QUEUEING_MODEL(res);
1343                 proto = cfgtew->u.cfgte->proto;
1344                 res->res_handle = cfgtew->u.cfgte->res_handle;
1345         }
1346
1347         ipr_update_ata_class(res, proto);
1348 }
1349
1350 /**
1351  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1352  *                        for the resource.
1353  * @res:        resource entry struct
1354  * @cfgtew:     config table entry wrapper struct
1355  *
1356  * Return value:
1357  *      none
1358  **/
1359 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1360 {
1361         struct ipr_resource_entry *gscsi_res = NULL;
1362         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1363
1364         if (!ioa_cfg->sis64)
1365                 return;
1366
1367         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1368                 clear_bit(res->target, ioa_cfg->array_ids);
1369         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1370                 clear_bit(res->target, ioa_cfg->vset_ids);
1371         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1372                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1373                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1374                                 return;
1375                 clear_bit(res->target, ioa_cfg->target_ids);
1376
1377         } else if (res->bus == 0)
1378                 clear_bit(res->target, ioa_cfg->target_ids);
1379 }
1380
1381 /**
1382  * ipr_handle_config_change - Handle a config change from the adapter
1383  * @ioa_cfg:    ioa config struct
1384  * @hostrcb:    hostrcb
1385  *
1386  * Return value:
1387  *      none
1388  **/
1389 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1390                                      struct ipr_hostrcb *hostrcb)
1391 {
1392         struct ipr_resource_entry *res = NULL;
1393         struct ipr_config_table_entry_wrapper cfgtew;
1394         __be32 cc_res_handle;
1395
1396         u32 is_ndn = 1;
1397
1398         if (ioa_cfg->sis64) {
1399                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1400                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1401         } else {
1402                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1403                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1404         }
1405
1406         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1407                 if (res->res_handle == cc_res_handle) {
1408                         is_ndn = 0;
1409                         break;
1410                 }
1411         }
1412
1413         if (is_ndn) {
1414                 if (list_empty(&ioa_cfg->free_res_q)) {
1415                         ipr_send_hcam(ioa_cfg,
1416                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1417                                       hostrcb);
1418                         return;
1419                 }
1420
1421                 res = list_entry(ioa_cfg->free_res_q.next,
1422                                  struct ipr_resource_entry, queue);
1423
1424                 list_del(&res->queue);
1425                 ipr_init_res_entry(res, &cfgtew);
1426                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1427         }
1428
1429         ipr_update_res_entry(res, &cfgtew);
1430
1431         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1432                 if (res->sdev) {
1433                         res->del_from_ml = 1;
1434                         res->res_handle = IPR_INVALID_RES_HANDLE;
1435                         schedule_work(&ioa_cfg->work_q);
1436                 } else {
1437                         ipr_clear_res_target(res);
1438                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1439                 }
1440         } else if (!res->sdev || res->del_from_ml) {
1441                 res->add_to_ml = 1;
1442                 schedule_work(&ioa_cfg->work_q);
1443         }
1444
1445         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1446 }
1447
1448 /**
1449  * ipr_process_ccn - Op done function for a CCN.
1450  * @ipr_cmd:    ipr command struct
1451  *
1452  * This function is the op done function for a configuration
1453  * change notification host controlled async from the adapter.
1454  *
1455  * Return value:
1456  *      none
1457  **/
1458 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1459 {
1460         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1461         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1462         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1463
1464         list_del(&hostrcb->queue);
1465         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1466
1467         if (ioasc) {
1468                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1469                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1470                         dev_err(&ioa_cfg->pdev->dev,
1471                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1472
1473                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1474         } else {
1475                 ipr_handle_config_change(ioa_cfg, hostrcb);
1476         }
1477 }
1478
1479 /**
1480  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1481  * @i:          index into buffer
1482  * @buf:                string to modify
1483  *
1484  * This function will strip all trailing whitespace, pad the end
1485  * of the string with a single space, and NULL terminate the string.
1486  *
1487  * Return value:
1488  *      new length of string
1489  **/
1490 static int strip_and_pad_whitespace(int i, char *buf)
1491 {
1492         while (i && buf[i] == ' ')
1493                 i--;
1494         buf[i+1] = ' ';
1495         buf[i+2] = '\0';
1496         return i + 2;
1497 }
1498
1499 /**
1500  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1501  * @prefix:             string to print at start of printk
1502  * @hostrcb:    hostrcb pointer
1503  * @vpd:                vendor/product id/sn struct
1504  *
1505  * Return value:
1506  *      none
1507  **/
1508 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1509                                 struct ipr_vpd *vpd)
1510 {
1511         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1512         int i = 0;
1513
1514         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1515         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1516
1517         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1518         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1519
1520         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1521         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1522
1523         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1524 }
1525
1526 /**
1527  * ipr_log_vpd - Log the passed VPD to the error log.
1528  * @vpd:                vendor/product id/sn struct
1529  *
1530  * Return value:
1531  *      none
1532  **/
1533 static void ipr_log_vpd(struct ipr_vpd *vpd)
1534 {
1535         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1536                     + IPR_SERIAL_NUM_LEN];
1537
1538         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1539         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1540                IPR_PROD_ID_LEN);
1541         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1542         ipr_err("Vendor/Product ID: %s\n", buffer);
1543
1544         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1545         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1546         ipr_err("    Serial Number: %s\n", buffer);
1547 }
1548
1549 /**
1550  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1551  * @prefix:             string to print at start of printk
1552  * @hostrcb:    hostrcb pointer
1553  * @vpd:                vendor/product id/sn/wwn struct
1554  *
1555  * Return value:
1556  *      none
1557  **/
1558 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1559                                     struct ipr_ext_vpd *vpd)
1560 {
1561         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1562         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1563                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1564 }
1565
1566 /**
1567  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1568  * @vpd:                vendor/product id/sn/wwn struct
1569  *
1570  * Return value:
1571  *      none
1572  **/
1573 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1574 {
1575         ipr_log_vpd(&vpd->vpd);
1576         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1577                 be32_to_cpu(vpd->wwid[1]));
1578 }
1579
1580 /**
1581  * ipr_log_enhanced_cache_error - Log a cache error.
1582  * @ioa_cfg:    ioa config struct
1583  * @hostrcb:    hostrcb struct
1584  *
1585  * Return value:
1586  *      none
1587  **/
1588 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1589                                          struct ipr_hostrcb *hostrcb)
1590 {
1591         struct ipr_hostrcb_type_12_error *error;
1592
1593         if (ioa_cfg->sis64)
1594                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1595         else
1596                 error = &hostrcb->hcam.u.error.u.type_12_error;
1597
1598         ipr_err("-----Current Configuration-----\n");
1599         ipr_err("Cache Directory Card Information:\n");
1600         ipr_log_ext_vpd(&error->ioa_vpd);
1601         ipr_err("Adapter Card Information:\n");
1602         ipr_log_ext_vpd(&error->cfc_vpd);
1603
1604         ipr_err("-----Expected Configuration-----\n");
1605         ipr_err("Cache Directory Card Information:\n");
1606         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1607         ipr_err("Adapter Card Information:\n");
1608         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1609
1610         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1611                      be32_to_cpu(error->ioa_data[0]),
1612                      be32_to_cpu(error->ioa_data[1]),
1613                      be32_to_cpu(error->ioa_data[2]));
1614 }
1615
1616 /**
1617  * ipr_log_cache_error - Log a cache error.
1618  * @ioa_cfg:    ioa config struct
1619  * @hostrcb:    hostrcb struct
1620  *
1621  * Return value:
1622  *      none
1623  **/
1624 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1625                                 struct ipr_hostrcb *hostrcb)
1626 {
1627         struct ipr_hostrcb_type_02_error *error =
1628                 &hostrcb->hcam.u.error.u.type_02_error;
1629
1630         ipr_err("-----Current Configuration-----\n");
1631         ipr_err("Cache Directory Card Information:\n");
1632         ipr_log_vpd(&error->ioa_vpd);
1633         ipr_err("Adapter Card Information:\n");
1634         ipr_log_vpd(&error->cfc_vpd);
1635
1636         ipr_err("-----Expected Configuration-----\n");
1637         ipr_err("Cache Directory Card Information:\n");
1638         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1639         ipr_err("Adapter Card Information:\n");
1640         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1641
1642         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1643                      be32_to_cpu(error->ioa_data[0]),
1644                      be32_to_cpu(error->ioa_data[1]),
1645                      be32_to_cpu(error->ioa_data[2]));
1646 }
1647
1648 /**
1649  * ipr_log_enhanced_config_error - Log a configuration error.
1650  * @ioa_cfg:    ioa config struct
1651  * @hostrcb:    hostrcb struct
1652  *
1653  * Return value:
1654  *      none
1655  **/
1656 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1657                                           struct ipr_hostrcb *hostrcb)
1658 {
1659         int errors_logged, i;
1660         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1661         struct ipr_hostrcb_type_13_error *error;
1662
1663         error = &hostrcb->hcam.u.error.u.type_13_error;
1664         errors_logged = be32_to_cpu(error->errors_logged);
1665
1666         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1667                 be32_to_cpu(error->errors_detected), errors_logged);
1668
1669         dev_entry = error->dev;
1670
1671         for (i = 0; i < errors_logged; i++, dev_entry++) {
1672                 ipr_err_separator;
1673
1674                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1675                 ipr_log_ext_vpd(&dev_entry->vpd);
1676
1677                 ipr_err("-----New Device Information-----\n");
1678                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1679
1680                 ipr_err("Cache Directory Card Information:\n");
1681                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1682
1683                 ipr_err("Adapter Card Information:\n");
1684                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1685         }
1686 }
1687
1688 /**
1689  * ipr_log_sis64_config_error - Log a device error.
1690  * @ioa_cfg:    ioa config struct
1691  * @hostrcb:    hostrcb struct
1692  *
1693  * Return value:
1694  *      none
1695  **/
1696 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1697                                        struct ipr_hostrcb *hostrcb)
1698 {
1699         int errors_logged, i;
1700         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1701         struct ipr_hostrcb_type_23_error *error;
1702         char buffer[IPR_MAX_RES_PATH_LENGTH];
1703
1704         error = &hostrcb->hcam.u.error64.u.type_23_error;
1705         errors_logged = be32_to_cpu(error->errors_logged);
1706
1707         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1708                 be32_to_cpu(error->errors_detected), errors_logged);
1709
1710         dev_entry = error->dev;
1711
1712         for (i = 0; i < errors_logged; i++, dev_entry++) {
1713                 ipr_err_separator;
1714
1715                 ipr_err("Device %d : %s", i + 1,
1716                         __ipr_format_res_path(dev_entry->res_path,
1717                                               buffer, sizeof(buffer)));
1718                 ipr_log_ext_vpd(&dev_entry->vpd);
1719
1720                 ipr_err("-----New Device Information-----\n");
1721                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1722
1723                 ipr_err("Cache Directory Card Information:\n");
1724                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1725
1726                 ipr_err("Adapter Card Information:\n");
1727                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1728         }
1729 }
1730
1731 /**
1732  * ipr_log_config_error - Log a configuration error.
1733  * @ioa_cfg:    ioa config struct
1734  * @hostrcb:    hostrcb struct
1735  *
1736  * Return value:
1737  *      none
1738  **/
1739 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1740                                  struct ipr_hostrcb *hostrcb)
1741 {
1742         int errors_logged, i;
1743         struct ipr_hostrcb_device_data_entry *dev_entry;
1744         struct ipr_hostrcb_type_03_error *error;
1745
1746         error = &hostrcb->hcam.u.error.u.type_03_error;
1747         errors_logged = be32_to_cpu(error->errors_logged);
1748
1749         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1750                 be32_to_cpu(error->errors_detected), errors_logged);
1751
1752         dev_entry = error->dev;
1753
1754         for (i = 0; i < errors_logged; i++, dev_entry++) {
1755                 ipr_err_separator;
1756
1757                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1758                 ipr_log_vpd(&dev_entry->vpd);
1759
1760                 ipr_err("-----New Device Information-----\n");
1761                 ipr_log_vpd(&dev_entry->new_vpd);
1762
1763                 ipr_err("Cache Directory Card Information:\n");
1764                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1765
1766                 ipr_err("Adapter Card Information:\n");
1767                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1768
1769                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1770                         be32_to_cpu(dev_entry->ioa_data[0]),
1771                         be32_to_cpu(dev_entry->ioa_data[1]),
1772                         be32_to_cpu(dev_entry->ioa_data[2]),
1773                         be32_to_cpu(dev_entry->ioa_data[3]),
1774                         be32_to_cpu(dev_entry->ioa_data[4]));
1775         }
1776 }
1777
1778 /**
1779  * ipr_log_enhanced_array_error - Log an array configuration error.
1780  * @ioa_cfg:    ioa config struct
1781  * @hostrcb:    hostrcb struct
1782  *
1783  * Return value:
1784  *      none
1785  **/
1786 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1787                                          struct ipr_hostrcb *hostrcb)
1788 {
1789         int i, num_entries;
1790         struct ipr_hostrcb_type_14_error *error;
1791         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1792         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1793
1794         error = &hostrcb->hcam.u.error.u.type_14_error;
1795
1796         ipr_err_separator;
1797
1798         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1799                 error->protection_level,
1800                 ioa_cfg->host->host_no,
1801                 error->last_func_vset_res_addr.bus,
1802                 error->last_func_vset_res_addr.target,
1803                 error->last_func_vset_res_addr.lun);
1804
1805         ipr_err_separator;
1806
1807         array_entry = error->array_member;
1808         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1809                             ARRAY_SIZE(error->array_member));
1810
1811         for (i = 0; i < num_entries; i++, array_entry++) {
1812                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1813                         continue;
1814
1815                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1816                         ipr_err("Exposed Array Member %d:\n", i);
1817                 else
1818                         ipr_err("Array Member %d:\n", i);
1819
1820                 ipr_log_ext_vpd(&array_entry->vpd);
1821                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1822                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1823                                  "Expected Location");
1824
1825                 ipr_err_separator;
1826         }
1827 }
1828
1829 /**
1830  * ipr_log_array_error - Log an array configuration error.
1831  * @ioa_cfg:    ioa config struct
1832  * @hostrcb:    hostrcb struct
1833  *
1834  * Return value:
1835  *      none
1836  **/
1837 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1838                                 struct ipr_hostrcb *hostrcb)
1839 {
1840         int i;
1841         struct ipr_hostrcb_type_04_error *error;
1842         struct ipr_hostrcb_array_data_entry *array_entry;
1843         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1844
1845         error = &hostrcb->hcam.u.error.u.type_04_error;
1846
1847         ipr_err_separator;
1848
1849         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1850                 error->protection_level,
1851                 ioa_cfg->host->host_no,
1852                 error->last_func_vset_res_addr.bus,
1853                 error->last_func_vset_res_addr.target,
1854                 error->last_func_vset_res_addr.lun);
1855
1856         ipr_err_separator;
1857
1858         array_entry = error->array_member;
1859
1860         for (i = 0; i < 18; i++) {
1861                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1862                         continue;
1863
1864                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1865                         ipr_err("Exposed Array Member %d:\n", i);
1866                 else
1867                         ipr_err("Array Member %d:\n", i);
1868
1869                 ipr_log_vpd(&array_entry->vpd);
1870
1871                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1872                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1873                                  "Expected Location");
1874
1875                 ipr_err_separator;
1876
1877                 if (i == 9)
1878                         array_entry = error->array_member2;
1879                 else
1880                         array_entry++;
1881         }
1882 }
1883
1884 /**
1885  * ipr_log_hex_data - Log additional hex IOA error data.
1886  * @ioa_cfg:    ioa config struct
1887  * @data:               IOA error data
1888  * @len:                data length
1889  *
1890  * Return value:
1891  *      none
1892  **/
1893 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1894 {
1895         int i;
1896
1897         if (len == 0)
1898                 return;
1899
1900         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1901                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1902
1903         for (i = 0; i < len / 4; i += 4) {
1904                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1905                         be32_to_cpu(data[i]),
1906                         be32_to_cpu(data[i+1]),
1907                         be32_to_cpu(data[i+2]),
1908                         be32_to_cpu(data[i+3]));
1909         }
1910 }
1911
1912 /**
1913  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1914  * @ioa_cfg:    ioa config struct
1915  * @hostrcb:    hostrcb struct
1916  *
1917  * Return value:
1918  *      none
1919  **/
1920 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1921                                             struct ipr_hostrcb *hostrcb)
1922 {
1923         struct ipr_hostrcb_type_17_error *error;
1924
1925         if (ioa_cfg->sis64)
1926                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1927         else
1928                 error = &hostrcb->hcam.u.error.u.type_17_error;
1929
1930         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1931         strim(error->failure_reason);
1932
1933         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1934                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1935         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1936         ipr_log_hex_data(ioa_cfg, error->data,
1937                          be32_to_cpu(hostrcb->hcam.length) -
1938                          (offsetof(struct ipr_hostrcb_error, u) +
1939                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1940 }
1941
1942 /**
1943  * ipr_log_dual_ioa_error - Log a dual adapter error.
1944  * @ioa_cfg:    ioa config struct
1945  * @hostrcb:    hostrcb struct
1946  *
1947  * Return value:
1948  *      none
1949  **/
1950 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1951                                    struct ipr_hostrcb *hostrcb)
1952 {
1953         struct ipr_hostrcb_type_07_error *error;
1954
1955         error = &hostrcb->hcam.u.error.u.type_07_error;
1956         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1957         strim(error->failure_reason);
1958
1959         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1960                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1961         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1962         ipr_log_hex_data(ioa_cfg, error->data,
1963                          be32_to_cpu(hostrcb->hcam.length) -
1964                          (offsetof(struct ipr_hostrcb_error, u) +
1965                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1966 }
1967
1968 static const struct {
1969         u8 active;
1970         char *desc;
1971 } path_active_desc[] = {
1972         { IPR_PATH_NO_INFO, "Path" },
1973         { IPR_PATH_ACTIVE, "Active path" },
1974         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1975 };
1976
1977 static const struct {
1978         u8 state;
1979         char *desc;
1980 } path_state_desc[] = {
1981         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1982         { IPR_PATH_HEALTHY, "is healthy" },
1983         { IPR_PATH_DEGRADED, "is degraded" },
1984         { IPR_PATH_FAILED, "is failed" }
1985 };
1986
1987 /**
1988  * ipr_log_fabric_path - Log a fabric path error
1989  * @hostrcb:    hostrcb struct
1990  * @fabric:             fabric descriptor
1991  *
1992  * Return value:
1993  *      none
1994  **/
1995 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1996                                 struct ipr_hostrcb_fabric_desc *fabric)
1997 {
1998         int i, j;
1999         u8 path_state = fabric->path_state;
2000         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2001         u8 state = path_state & IPR_PATH_STATE_MASK;
2002
2003         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2004                 if (path_active_desc[i].active != active)
2005                         continue;
2006
2007                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2008                         if (path_state_desc[j].state != state)
2009                                 continue;
2010
2011                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2012                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2013                                              path_active_desc[i].desc, path_state_desc[j].desc,
2014                                              fabric->ioa_port);
2015                         } else if (fabric->cascaded_expander == 0xff) {
2016                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2017                                              path_active_desc[i].desc, path_state_desc[j].desc,
2018                                              fabric->ioa_port, fabric->phy);
2019                         } else if (fabric->phy == 0xff) {
2020                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2021                                              path_active_desc[i].desc, path_state_desc[j].desc,
2022                                              fabric->ioa_port, fabric->cascaded_expander);
2023                         } else {
2024                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2025                                              path_active_desc[i].desc, path_state_desc[j].desc,
2026                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2027                         }
2028                         return;
2029                 }
2030         }
2031
2032         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2033                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2034 }
2035
2036 /**
2037  * ipr_log64_fabric_path - Log a fabric path error
2038  * @hostrcb:    hostrcb struct
2039  * @fabric:             fabric descriptor
2040  *
2041  * Return value:
2042  *      none
2043  **/
2044 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2045                                   struct ipr_hostrcb64_fabric_desc *fabric)
2046 {
2047         int i, j;
2048         u8 path_state = fabric->path_state;
2049         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2050         u8 state = path_state & IPR_PATH_STATE_MASK;
2051         char buffer[IPR_MAX_RES_PATH_LENGTH];
2052
2053         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2054                 if (path_active_desc[i].active != active)
2055                         continue;
2056
2057                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2058                         if (path_state_desc[j].state != state)
2059                                 continue;
2060
2061                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2062                                      path_active_desc[i].desc, path_state_desc[j].desc,
2063                                      ipr_format_res_path(hostrcb->ioa_cfg,
2064                                                 fabric->res_path,
2065                                                 buffer, sizeof(buffer)));
2066                         return;
2067                 }
2068         }
2069
2070         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2071                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2072                                     buffer, sizeof(buffer)));
2073 }
2074
2075 static const struct {
2076         u8 type;
2077         char *desc;
2078 } path_type_desc[] = {
2079         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2080         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2081         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2082         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2083 };
2084
2085 static const struct {
2086         u8 status;
2087         char *desc;
2088 } path_status_desc[] = {
2089         { IPR_PATH_CFG_NO_PROB, "Functional" },
2090         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2091         { IPR_PATH_CFG_FAILED, "Failed" },
2092         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2093         { IPR_PATH_NOT_DETECTED, "Missing" },
2094         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2095 };
2096
2097 static const char *link_rate[] = {
2098         "unknown",
2099         "disabled",
2100         "phy reset problem",
2101         "spinup hold",
2102         "port selector",
2103         "unknown",
2104         "unknown",
2105         "unknown",
2106         "1.5Gbps",
2107         "3.0Gbps",
2108         "unknown",
2109         "unknown",
2110         "unknown",
2111         "unknown",
2112         "unknown",
2113         "unknown"
2114 };
2115
2116 /**
2117  * ipr_log_path_elem - Log a fabric path element.
2118  * @hostrcb:    hostrcb struct
2119  * @cfg:                fabric path element struct
2120  *
2121  * Return value:
2122  *      none
2123  **/
2124 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2125                               struct ipr_hostrcb_config_element *cfg)
2126 {
2127         int i, j;
2128         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2129         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2130
2131         if (type == IPR_PATH_CFG_NOT_EXIST)
2132                 return;
2133
2134         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2135                 if (path_type_desc[i].type != type)
2136                         continue;
2137
2138                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2139                         if (path_status_desc[j].status != status)
2140                                 continue;
2141
2142                         if (type == IPR_PATH_CFG_IOA_PORT) {
2143                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2144                                              path_status_desc[j].desc, path_type_desc[i].desc,
2145                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2146                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2147                         } else {
2148                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2149                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2150                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2151                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2152                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2153                                 } else if (cfg->cascaded_expander == 0xff) {
2154                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2155                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2156                                                      path_type_desc[i].desc, cfg->phy,
2157                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2158                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2159                                 } else if (cfg->phy == 0xff) {
2160                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2161                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2162                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2163                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2164                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2165                                 } else {
2166                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2167                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2168                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2169                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2170                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2171                                 }
2172                         }
2173                         return;
2174                 }
2175         }
2176
2177         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2178                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2179                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2180                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2181 }
2182
2183 /**
2184  * ipr_log64_path_elem - Log a fabric path element.
2185  * @hostrcb:    hostrcb struct
2186  * @cfg:                fabric path element struct
2187  *
2188  * Return value:
2189  *      none
2190  **/
2191 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2192                                 struct ipr_hostrcb64_config_element *cfg)
2193 {
2194         int i, j;
2195         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2196         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2197         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2198         char buffer[IPR_MAX_RES_PATH_LENGTH];
2199
2200         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2201                 return;
2202
2203         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2204                 if (path_type_desc[i].type != type)
2205                         continue;
2206
2207                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2208                         if (path_status_desc[j].status != status)
2209                                 continue;
2210
2211                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2212                                      path_status_desc[j].desc, path_type_desc[i].desc,
2213                                      ipr_format_res_path(hostrcb->ioa_cfg,
2214                                         cfg->res_path, buffer, sizeof(buffer)),
2215                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2216                                         be32_to_cpu(cfg->wwid[0]),
2217                                         be32_to_cpu(cfg->wwid[1]));
2218                         return;
2219                 }
2220         }
2221         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2222                      "WWN=%08X%08X\n", cfg->type_status,
2223                      ipr_format_res_path(hostrcb->ioa_cfg,
2224                         cfg->res_path, buffer, sizeof(buffer)),
2225                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2226                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2227 }
2228
2229 /**
2230  * ipr_log_fabric_error - Log a fabric error.
2231  * @ioa_cfg:    ioa config struct
2232  * @hostrcb:    hostrcb struct
2233  *
2234  * Return value:
2235  *      none
2236  **/
2237 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2238                                  struct ipr_hostrcb *hostrcb)
2239 {
2240         struct ipr_hostrcb_type_20_error *error;
2241         struct ipr_hostrcb_fabric_desc *fabric;
2242         struct ipr_hostrcb_config_element *cfg;
2243         int i, add_len;
2244
2245         error = &hostrcb->hcam.u.error.u.type_20_error;
2246         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2247         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2248
2249         add_len = be32_to_cpu(hostrcb->hcam.length) -
2250                 (offsetof(struct ipr_hostrcb_error, u) +
2251                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2252
2253         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2254                 ipr_log_fabric_path(hostrcb, fabric);
2255                 for_each_fabric_cfg(fabric, cfg)
2256                         ipr_log_path_elem(hostrcb, cfg);
2257
2258                 add_len -= be16_to_cpu(fabric->length);
2259                 fabric = (struct ipr_hostrcb_fabric_desc *)
2260                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2261         }
2262
2263         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2264 }
2265
2266 /**
2267  * ipr_log_sis64_array_error - Log a sis64 array error.
2268  * @ioa_cfg:    ioa config struct
2269  * @hostrcb:    hostrcb struct
2270  *
2271  * Return value:
2272  *      none
2273  **/
2274 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2275                                       struct ipr_hostrcb *hostrcb)
2276 {
2277         int i, num_entries;
2278         struct ipr_hostrcb_type_24_error *error;
2279         struct ipr_hostrcb64_array_data_entry *array_entry;
2280         char buffer[IPR_MAX_RES_PATH_LENGTH];
2281         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2282
2283         error = &hostrcb->hcam.u.error64.u.type_24_error;
2284
2285         ipr_err_separator;
2286
2287         ipr_err("RAID %s Array Configuration: %s\n",
2288                 error->protection_level,
2289                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2290                         buffer, sizeof(buffer)));
2291
2292         ipr_err_separator;
2293
2294         array_entry = error->array_member;
2295         num_entries = min_t(u32, error->num_entries,
2296                             ARRAY_SIZE(error->array_member));
2297
2298         for (i = 0; i < num_entries; i++, array_entry++) {
2299
2300                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2301                         continue;
2302
2303                 if (error->exposed_mode_adn == i)
2304                         ipr_err("Exposed Array Member %d:\n", i);
2305                 else
2306                         ipr_err("Array Member %d:\n", i);
2307
2308                 ipr_err("Array Member %d:\n", i);
2309                 ipr_log_ext_vpd(&array_entry->vpd);
2310                 ipr_err("Current Location: %s\n",
2311                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2312                                 buffer, sizeof(buffer)));
2313                 ipr_err("Expected Location: %s\n",
2314                          ipr_format_res_path(ioa_cfg,
2315                                 array_entry->expected_res_path,
2316                                 buffer, sizeof(buffer)));
2317
2318                 ipr_err_separator;
2319         }
2320 }
2321
2322 /**
2323  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2324  * @ioa_cfg:    ioa config struct
2325  * @hostrcb:    hostrcb struct
2326  *
2327  * Return value:
2328  *      none
2329  **/
2330 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2331                                        struct ipr_hostrcb *hostrcb)
2332 {
2333         struct ipr_hostrcb_type_30_error *error;
2334         struct ipr_hostrcb64_fabric_desc *fabric;
2335         struct ipr_hostrcb64_config_element *cfg;
2336         int i, add_len;
2337
2338         error = &hostrcb->hcam.u.error64.u.type_30_error;
2339
2340         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2341         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2342
2343         add_len = be32_to_cpu(hostrcb->hcam.length) -
2344                 (offsetof(struct ipr_hostrcb64_error, u) +
2345                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2346
2347         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2348                 ipr_log64_fabric_path(hostrcb, fabric);
2349                 for_each_fabric_cfg(fabric, cfg)
2350                         ipr_log64_path_elem(hostrcb, cfg);
2351
2352                 add_len -= be16_to_cpu(fabric->length);
2353                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2354                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2355         }
2356
2357         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2358 }
2359
2360 /**
2361  * ipr_log_generic_error - Log an adapter error.
2362  * @ioa_cfg:    ioa config struct
2363  * @hostrcb:    hostrcb struct
2364  *
2365  * Return value:
2366  *      none
2367  **/
2368 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2369                                   struct ipr_hostrcb *hostrcb)
2370 {
2371         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2372                          be32_to_cpu(hostrcb->hcam.length));
2373 }
2374
2375 /**
2376  * ipr_log_sis64_device_error - Log a cache error.
2377  * @ioa_cfg:    ioa config struct
2378  * @hostrcb:    hostrcb struct
2379  *
2380  * Return value:
2381  *      none
2382  **/
2383 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2384                                          struct ipr_hostrcb *hostrcb)
2385 {
2386         struct ipr_hostrcb_type_21_error *error;
2387         char buffer[IPR_MAX_RES_PATH_LENGTH];
2388
2389         error = &hostrcb->hcam.u.error64.u.type_21_error;
2390
2391         ipr_err("-----Failing Device Information-----\n");
2392         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2393                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2394                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2395         ipr_err("Device Resource Path: %s\n",
2396                 __ipr_format_res_path(error->res_path,
2397                                       buffer, sizeof(buffer)));
2398         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2399         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2400         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2401         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2402         ipr_err("SCSI Sense Data:\n");
2403         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2404         ipr_err("SCSI Command Descriptor Block: \n");
2405         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2406
2407         ipr_err("Additional IOA Data:\n");
2408         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2409 }
2410
2411 /**
2412  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2413  * @ioasc:      IOASC
2414  *
2415  * This function will return the index of into the ipr_error_table
2416  * for the specified IOASC. If the IOASC is not in the table,
2417  * 0 will be returned, which points to the entry used for unknown errors.
2418  *
2419  * Return value:
2420  *      index into the ipr_error_table
2421  **/
2422 static u32 ipr_get_error(u32 ioasc)
2423 {
2424         int i;
2425
2426         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2427                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2428                         return i;
2429
2430         return 0;
2431 }
2432
2433 /**
2434  * ipr_handle_log_data - Log an adapter error.
2435  * @ioa_cfg:    ioa config struct
2436  * @hostrcb:    hostrcb struct
2437  *
2438  * This function logs an adapter error to the system.
2439  *
2440  * Return value:
2441  *      none
2442  **/
2443 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2444                                 struct ipr_hostrcb *hostrcb)
2445 {
2446         u32 ioasc;
2447         int error_index;
2448         struct ipr_hostrcb_type_21_error *error;
2449
2450         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2451                 return;
2452
2453         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2454                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2455
2456         if (ioa_cfg->sis64)
2457                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2458         else
2459                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2460
2461         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2462             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2463                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2464                 scsi_report_bus_reset(ioa_cfg->host,
2465                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2466         }
2467
2468         error_index = ipr_get_error(ioasc);
2469
2470         if (!ipr_error_table[error_index].log_hcam)
2471                 return;
2472
2473         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2474             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2475                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2476
2477                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2478                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2479                                 return;
2480         }
2481
2482         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2483
2484         /* Set indication we have logged an error */
2485         ioa_cfg->errors_logged++;
2486
2487         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2488                 return;
2489         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2490                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2491
2492         switch (hostrcb->hcam.overlay_id) {
2493         case IPR_HOST_RCB_OVERLAY_ID_2:
2494                 ipr_log_cache_error(ioa_cfg, hostrcb);
2495                 break;
2496         case IPR_HOST_RCB_OVERLAY_ID_3:
2497                 ipr_log_config_error(ioa_cfg, hostrcb);
2498                 break;
2499         case IPR_HOST_RCB_OVERLAY_ID_4:
2500         case IPR_HOST_RCB_OVERLAY_ID_6:
2501                 ipr_log_array_error(ioa_cfg, hostrcb);
2502                 break;
2503         case IPR_HOST_RCB_OVERLAY_ID_7:
2504                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2505                 break;
2506         case IPR_HOST_RCB_OVERLAY_ID_12:
2507                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2508                 break;
2509         case IPR_HOST_RCB_OVERLAY_ID_13:
2510                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2511                 break;
2512         case IPR_HOST_RCB_OVERLAY_ID_14:
2513         case IPR_HOST_RCB_OVERLAY_ID_16:
2514                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2515                 break;
2516         case IPR_HOST_RCB_OVERLAY_ID_17:
2517                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2518                 break;
2519         case IPR_HOST_RCB_OVERLAY_ID_20:
2520                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2521                 break;
2522         case IPR_HOST_RCB_OVERLAY_ID_21:
2523                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2524                 break;
2525         case IPR_HOST_RCB_OVERLAY_ID_23:
2526                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2527                 break;
2528         case IPR_HOST_RCB_OVERLAY_ID_24:
2529         case IPR_HOST_RCB_OVERLAY_ID_26:
2530                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2531                 break;
2532         case IPR_HOST_RCB_OVERLAY_ID_30:
2533                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2534                 break;
2535         case IPR_HOST_RCB_OVERLAY_ID_1:
2536         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2537         default:
2538                 ipr_log_generic_error(ioa_cfg, hostrcb);
2539                 break;
2540         }
2541 }
2542
2543 /**
2544  * ipr_process_error - Op done function for an adapter error log.
2545  * @ipr_cmd:    ipr command struct
2546  *
2547  * This function is the op done function for an error log host
2548  * controlled async from the adapter. It will log the error and
2549  * send the HCAM back to the adapter.
2550  *
2551  * Return value:
2552  *      none
2553  **/
2554 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2555 {
2556         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2557         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2558         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2559         u32 fd_ioasc;
2560
2561         if (ioa_cfg->sis64)
2562                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2563         else
2564                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2565
2566         list_del(&hostrcb->queue);
2567         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2568
2569         if (!ioasc) {
2570                 ipr_handle_log_data(ioa_cfg, hostrcb);
2571                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2572                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2573         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2574                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2575                 dev_err(&ioa_cfg->pdev->dev,
2576                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2577         }
2578
2579         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2580 }
2581
2582 /**
2583  * ipr_timeout -  An internally generated op has timed out.
2584  * @ipr_cmd:    ipr command struct
2585  *
2586  * This function blocks host requests and initiates an
2587  * adapter reset.
2588  *
2589  * Return value:
2590  *      none
2591  **/
2592 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2593 {
2594         unsigned long lock_flags = 0;
2595         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2596
2597         ENTER;
2598         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2599
2600         ioa_cfg->errors_logged++;
2601         dev_err(&ioa_cfg->pdev->dev,
2602                 "Adapter being reset due to command timeout.\n");
2603
2604         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2605                 ioa_cfg->sdt_state = GET_DUMP;
2606
2607         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2608                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2609
2610         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2611         LEAVE;
2612 }
2613
2614 /**
2615  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2616  * @ipr_cmd:    ipr command struct
2617  *
2618  * This function blocks host requests and initiates an
2619  * adapter reset.
2620  *
2621  * Return value:
2622  *      none
2623  **/
2624 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2625 {
2626         unsigned long lock_flags = 0;
2627         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628
2629         ENTER;
2630         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2631
2632         ioa_cfg->errors_logged++;
2633         dev_err(&ioa_cfg->pdev->dev,
2634                 "Adapter timed out transitioning to operational.\n");
2635
2636         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2637                 ioa_cfg->sdt_state = GET_DUMP;
2638
2639         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2640                 if (ipr_fastfail)
2641                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2642                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2643         }
2644
2645         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2646         LEAVE;
2647 }
2648
2649 /**
2650  * ipr_find_ses_entry - Find matching SES in SES table
2651  * @res:        resource entry struct of SES
2652  *
2653  * Return value:
2654  *      pointer to SES table entry / NULL on failure
2655  **/
2656 static const struct ipr_ses_table_entry *
2657 ipr_find_ses_entry(struct ipr_resource_entry *res)
2658 {
2659         int i, j, matches;
2660         struct ipr_std_inq_vpids *vpids;
2661         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2662
2663         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2664                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2665                         if (ste->compare_product_id_byte[j] == 'X') {
2666                                 vpids = &res->std_inq_data.vpids;
2667                                 if (vpids->product_id[j] == ste->product_id[j])
2668                                         matches++;
2669                                 else
2670                                         break;
2671                         } else
2672                                 matches++;
2673                 }
2674
2675                 if (matches == IPR_PROD_ID_LEN)
2676                         return ste;
2677         }
2678
2679         return NULL;
2680 }
2681
2682 /**
2683  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2684  * @ioa_cfg:    ioa config struct
2685  * @bus:                SCSI bus
2686  * @bus_width:  bus width
2687  *
2688  * Return value:
2689  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2690  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2691  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2692  *      max 160MHz = max 320MB/sec).
2693  **/
2694 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2695 {
2696         struct ipr_resource_entry *res;
2697         const struct ipr_ses_table_entry *ste;
2698         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2699
2700         /* Loop through each config table entry in the config table buffer */
2701         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2702                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2703                         continue;
2704
2705                 if (bus != res->bus)
2706                         continue;
2707
2708                 if (!(ste = ipr_find_ses_entry(res)))
2709                         continue;
2710
2711                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2712         }
2713
2714         return max_xfer_rate;
2715 }
2716
2717 /**
2718  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2719  * @ioa_cfg:            ioa config struct
2720  * @max_delay:          max delay in micro-seconds to wait
2721  *
2722  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2723  *
2724  * Return value:
2725  *      0 on success / other on failure
2726  **/
2727 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2728 {
2729         volatile u32 pcii_reg;
2730         int delay = 1;
2731
2732         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2733         while (delay < max_delay) {
2734                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2735
2736                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2737                         return 0;
2738
2739                 /* udelay cannot be used if delay is more than a few milliseconds */
2740                 if ((delay / 1000) > MAX_UDELAY_MS)
2741                         mdelay(delay / 1000);
2742                 else
2743                         udelay(delay);
2744
2745                 delay += delay;
2746         }
2747         return -EIO;
2748 }
2749
2750 /**
2751  * ipr_get_sis64_dump_data_section - Dump IOA memory
2752  * @ioa_cfg:                    ioa config struct
2753  * @start_addr:                 adapter address to dump
2754  * @dest:                       destination kernel buffer
2755  * @length_in_words:            length to dump in 4 byte words
2756  *
2757  * Return value:
2758  *      0 on success
2759  **/
2760 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2761                                            u32 start_addr,
2762                                            __be32 *dest, u32 length_in_words)
2763 {
2764         int i;
2765
2766         for (i = 0; i < length_in_words; i++) {
2767                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2768                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2769                 dest++;
2770         }
2771
2772         return 0;
2773 }
2774
2775 /**
2776  * ipr_get_ldump_data_section - Dump IOA memory
2777  * @ioa_cfg:                    ioa config struct
2778  * @start_addr:                 adapter address to dump
2779  * @dest:                               destination kernel buffer
2780  * @length_in_words:    length to dump in 4 byte words
2781  *
2782  * Return value:
2783  *      0 on success / -EIO on failure
2784  **/
2785 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2786                                       u32 start_addr,
2787                                       __be32 *dest, u32 length_in_words)
2788 {
2789         volatile u32 temp_pcii_reg;
2790         int i, delay = 0;
2791
2792         if (ioa_cfg->sis64)
2793                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2794                                                        dest, length_in_words);
2795
2796         /* Write IOA interrupt reg starting LDUMP state  */
2797         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2798                ioa_cfg->regs.set_uproc_interrupt_reg32);
2799
2800         /* Wait for IO debug acknowledge */
2801         if (ipr_wait_iodbg_ack(ioa_cfg,
2802                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2803                 dev_err(&ioa_cfg->pdev->dev,
2804                         "IOA dump long data transfer timeout\n");
2805                 return -EIO;
2806         }
2807
2808         /* Signal LDUMP interlocked - clear IO debug ack */
2809         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2810                ioa_cfg->regs.clr_interrupt_reg);
2811
2812         /* Write Mailbox with starting address */
2813         writel(start_addr, ioa_cfg->ioa_mailbox);
2814
2815         /* Signal address valid - clear IOA Reset alert */
2816         writel(IPR_UPROCI_RESET_ALERT,
2817                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2818
2819         for (i = 0; i < length_in_words; i++) {
2820                 /* Wait for IO debug acknowledge */
2821                 if (ipr_wait_iodbg_ack(ioa_cfg,
2822                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2823                         dev_err(&ioa_cfg->pdev->dev,
2824                                 "IOA dump short data transfer timeout\n");
2825                         return -EIO;
2826                 }
2827
2828                 /* Read data from mailbox and increment destination pointer */
2829                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2830                 dest++;
2831
2832                 /* For all but the last word of data, signal data received */
2833                 if (i < (length_in_words - 1)) {
2834                         /* Signal dump data received - Clear IO debug Ack */
2835                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2836                                ioa_cfg->regs.clr_interrupt_reg);
2837                 }
2838         }
2839
2840         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2841         writel(IPR_UPROCI_RESET_ALERT,
2842                ioa_cfg->regs.set_uproc_interrupt_reg32);
2843
2844         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2845                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2846
2847         /* Signal dump data received - Clear IO debug Ack */
2848         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2849                ioa_cfg->regs.clr_interrupt_reg);
2850
2851         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2852         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2853                 temp_pcii_reg =
2854                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2855
2856                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2857                         return 0;
2858
2859                 udelay(10);
2860                 delay += 10;
2861         }
2862
2863         return 0;
2864 }
2865
2866 #ifdef CONFIG_SCSI_IPR_DUMP
2867 /**
2868  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2869  * @ioa_cfg:            ioa config struct
2870  * @pci_address:        adapter address
2871  * @length:                     length of data to copy
2872  *
2873  * Copy data from PCI adapter to kernel buffer.
2874  * Note: length MUST be a 4 byte multiple
2875  * Return value:
2876  *      0 on success / other on failure
2877  **/
2878 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2879                         unsigned long pci_address, u32 length)
2880 {
2881         int bytes_copied = 0;
2882         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2883         __be32 *page;
2884         unsigned long lock_flags = 0;
2885         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2886
2887         if (ioa_cfg->sis64)
2888                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2889         else
2890                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2891
2892         while (bytes_copied < length &&
2893                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2894                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2895                     ioa_dump->page_offset == 0) {
2896                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2897
2898                         if (!page) {
2899                                 ipr_trace;
2900                                 return bytes_copied;
2901                         }
2902
2903                         ioa_dump->page_offset = 0;
2904                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2905                         ioa_dump->next_page_index++;
2906                 } else
2907                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2908
2909                 rem_len = length - bytes_copied;
2910                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2911                 cur_len = min(rem_len, rem_page_len);
2912
2913                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2914                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2915                         rc = -EIO;
2916                 } else {
2917                         rc = ipr_get_ldump_data_section(ioa_cfg,
2918                                                         pci_address + bytes_copied,
2919                                                         &page[ioa_dump->page_offset / 4],
2920                                                         (cur_len / sizeof(u32)));
2921                 }
2922                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923
2924                 if (!rc) {
2925                         ioa_dump->page_offset += cur_len;
2926                         bytes_copied += cur_len;
2927                 } else {
2928                         ipr_trace;
2929                         break;
2930                 }
2931                 schedule();
2932         }
2933
2934         return bytes_copied;
2935 }
2936
2937 /**
2938  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2939  * @hdr:        dump entry header struct
2940  *
2941  * Return value:
2942  *      nothing
2943  **/
2944 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2945 {
2946         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2947         hdr->num_elems = 1;
2948         hdr->offset = sizeof(*hdr);
2949         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2950 }
2951
2952 /**
2953  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2954  * @ioa_cfg:    ioa config struct
2955  * @driver_dump:        driver dump struct
2956  *
2957  * Return value:
2958  *      nothing
2959  **/
2960 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2961                                    struct ipr_driver_dump *driver_dump)
2962 {
2963         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2964
2965         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2966         driver_dump->ioa_type_entry.hdr.len =
2967                 sizeof(struct ipr_dump_ioa_type_entry) -
2968                 sizeof(struct ipr_dump_entry_header);
2969         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2970         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2971         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2972         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2973                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2974                 ucode_vpd->minor_release[1];
2975         driver_dump->hdr.num_entries++;
2976 }
2977
2978 /**
2979  * ipr_dump_version_data - Fill in the driver version in the dump.
2980  * @ioa_cfg:    ioa config struct
2981  * @driver_dump:        driver dump struct
2982  *
2983  * Return value:
2984  *      nothing
2985  **/
2986 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2987                                   struct ipr_driver_dump *driver_dump)
2988 {
2989         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2990         driver_dump->version_entry.hdr.len =
2991                 sizeof(struct ipr_dump_version_entry) -
2992                 sizeof(struct ipr_dump_entry_header);
2993         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2994         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2995         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2996         driver_dump->hdr.num_entries++;
2997 }
2998
2999 /**
3000  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3001  * @ioa_cfg:    ioa config struct
3002  * @driver_dump:        driver dump struct
3003  *
3004  * Return value:
3005  *      nothing
3006  **/
3007 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3008                                    struct ipr_driver_dump *driver_dump)
3009 {
3010         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3011         driver_dump->trace_entry.hdr.len =
3012                 sizeof(struct ipr_dump_trace_entry) -
3013                 sizeof(struct ipr_dump_entry_header);
3014         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3015         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3016         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3017         driver_dump->hdr.num_entries++;
3018 }
3019
3020 /**
3021  * ipr_dump_location_data - Fill in the IOA location in the dump.
3022  * @ioa_cfg:    ioa config struct
3023  * @driver_dump:        driver dump struct
3024  *
3025  * Return value:
3026  *      nothing
3027  **/
3028 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3029                                    struct ipr_driver_dump *driver_dump)
3030 {
3031         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3032         driver_dump->location_entry.hdr.len =
3033                 sizeof(struct ipr_dump_location_entry) -
3034                 sizeof(struct ipr_dump_entry_header);
3035         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3036         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3037         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3038         driver_dump->hdr.num_entries++;
3039 }
3040
3041 /**
3042  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3043  * @ioa_cfg:    ioa config struct
3044  * @dump:               dump struct
3045  *
3046  * Return value:
3047  *      nothing
3048  **/
3049 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3050 {
3051         unsigned long start_addr, sdt_word;
3052         unsigned long lock_flags = 0;
3053         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3054         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3055         u32 num_entries, max_num_entries, start_off, end_off;
3056         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3057         struct ipr_sdt *sdt;
3058         int valid = 1;
3059         int i;
3060
3061         ENTER;
3062
3063         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3064
3065         if (ioa_cfg->sdt_state != READ_DUMP) {
3066                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067                 return;
3068         }
3069
3070         if (ioa_cfg->sis64) {
3071                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072                 ssleep(IPR_DUMP_DELAY_SECONDS);
3073                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3074         }
3075
3076         start_addr = readl(ioa_cfg->ioa_mailbox);
3077
3078         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3079                 dev_err(&ioa_cfg->pdev->dev,
3080                         "Invalid dump table format: %lx\n", start_addr);
3081                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3082                 return;
3083         }
3084
3085         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3086
3087         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3088
3089         /* Initialize the overall dump header */
3090         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3091         driver_dump->hdr.num_entries = 1;
3092         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3093         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3094         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3095         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3096
3097         ipr_dump_version_data(ioa_cfg, driver_dump);
3098         ipr_dump_location_data(ioa_cfg, driver_dump);
3099         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3100         ipr_dump_trace_data(ioa_cfg, driver_dump);
3101
3102         /* Update dump_header */
3103         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3104
3105         /* IOA Dump entry */
3106         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3107         ioa_dump->hdr.len = 0;
3108         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3109         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3110
3111         /* First entries in sdt are actually a list of dump addresses and
3112          lengths to gather the real dump data.  sdt represents the pointer
3113          to the ioa generated dump table.  Dump data will be extracted based
3114          on entries in this table */
3115         sdt = &ioa_dump->sdt;
3116
3117         if (ioa_cfg->sis64) {
3118                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3119                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3120         } else {
3121                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3122                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3123         }
3124
3125         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3126                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3127         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3128                                         bytes_to_copy / sizeof(__be32));
3129
3130         /* Smart Dump table is ready to use and the first entry is valid */
3131         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3132             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3133                 dev_err(&ioa_cfg->pdev->dev,
3134                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3135                         rc, be32_to_cpu(sdt->hdr.state));
3136                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3137                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3138                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139                 return;
3140         }
3141
3142         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3143
3144         if (num_entries > max_num_entries)
3145                 num_entries = max_num_entries;
3146
3147         /* Update dump length to the actual data to be copied */
3148         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3149         if (ioa_cfg->sis64)
3150                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3151         else
3152                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3153
3154         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3155
3156         for (i = 0; i < num_entries; i++) {
3157                 if (ioa_dump->hdr.len > max_dump_size) {
3158                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3159                         break;
3160                 }
3161
3162                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3163                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3164                         if (ioa_cfg->sis64)
3165                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3166                         else {
3167                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3168                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3169
3170                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3171                                         bytes_to_copy = end_off - start_off;
3172                                 else
3173                                         valid = 0;
3174                         }
3175                         if (valid) {
3176                                 if (bytes_to_copy > max_dump_size) {
3177                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3178                                         continue;
3179                                 }
3180
3181                                 /* Copy data from adapter to driver buffers */
3182                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3183                                                             bytes_to_copy);
3184
3185                                 ioa_dump->hdr.len += bytes_copied;
3186
3187                                 if (bytes_copied != bytes_to_copy) {
3188                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3189                                         break;
3190                                 }
3191                         }
3192                 }
3193         }
3194
3195         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3196
3197         /* Update dump_header */
3198         driver_dump->hdr.len += ioa_dump->hdr.len;
3199         wmb();
3200         ioa_cfg->sdt_state = DUMP_OBTAINED;
3201         LEAVE;
3202 }
3203
3204 #else
3205 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3206 #endif
3207
3208 /**
3209  * ipr_release_dump - Free adapter dump memory
3210  * @kref:       kref struct
3211  *
3212  * Return value:
3213  *      nothing
3214  **/
3215 static void ipr_release_dump(struct kref *kref)
3216 {
3217         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3218         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3219         unsigned long lock_flags = 0;
3220         int i;
3221
3222         ENTER;
3223         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224         ioa_cfg->dump = NULL;
3225         ioa_cfg->sdt_state = INACTIVE;
3226         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227
3228         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3229                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3230
3231         vfree(dump->ioa_dump.ioa_data);
3232         kfree(dump);
3233         LEAVE;
3234 }
3235
3236 /**
3237  * ipr_worker_thread - Worker thread
3238  * @work:               ioa config struct
3239  *
3240  * Called at task level from a work thread. This function takes care
3241  * of adding and removing device from the mid-layer as configuration
3242  * changes are detected by the adapter.
3243  *
3244  * Return value:
3245  *      nothing
3246  **/
3247 static void ipr_worker_thread(struct work_struct *work)
3248 {
3249         unsigned long lock_flags;
3250         struct ipr_resource_entry *res;
3251         struct scsi_device *sdev;
3252         struct ipr_dump *dump;
3253         struct ipr_ioa_cfg *ioa_cfg =
3254                 container_of(work, struct ipr_ioa_cfg, work_q);
3255         u8 bus, target, lun;
3256         int did_work;
3257
3258         ENTER;
3259         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3260
3261         if (ioa_cfg->sdt_state == READ_DUMP) {
3262                 dump = ioa_cfg->dump;
3263                 if (!dump) {
3264                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3265                         return;
3266                 }
3267                 kref_get(&dump->kref);
3268                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269                 ipr_get_ioa_dump(ioa_cfg, dump);
3270                 kref_put(&dump->kref, ipr_release_dump);
3271
3272                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3274                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3275                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276                 return;
3277         }
3278
3279 restart:
3280         do {
3281                 did_work = 0;
3282                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3283                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3284                         return;
3285                 }
3286
3287                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3288                         if (res->del_from_ml && res->sdev) {
3289                                 did_work = 1;
3290                                 sdev = res->sdev;
3291                                 if (!scsi_device_get(sdev)) {
3292                                         if (!res->add_to_ml)
3293                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3294                                         else
3295                                                 res->del_from_ml = 0;
3296                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297                                         scsi_remove_device(sdev);
3298                                         scsi_device_put(sdev);
3299                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300                                 }
3301                                 break;
3302                         }
3303                 }
3304         } while (did_work);
3305
3306         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3307                 if (res->add_to_ml) {
3308                         bus = res->bus;
3309                         target = res->target;
3310                         lun = res->lun;
3311                         res->add_to_ml = 0;
3312                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3314                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3315                         goto restart;
3316                 }
3317         }
3318
3319         ioa_cfg->scan_done = 1;
3320         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3321         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3322         LEAVE;
3323 }
3324
3325 #ifdef CONFIG_SCSI_IPR_TRACE
3326 /**
3327  * ipr_read_trace - Dump the adapter trace
3328  * @filp:               open sysfs file
3329  * @kobj:               kobject struct
3330  * @bin_attr:           bin_attribute struct
3331  * @buf:                buffer
3332  * @off:                offset
3333  * @count:              buffer size
3334  *
3335  * Return value:
3336  *      number of bytes printed to buffer
3337  **/
3338 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3339                               struct bin_attribute *bin_attr,
3340                               char *buf, loff_t off, size_t count)
3341 {
3342         struct device *dev = container_of(kobj, struct device, kobj);
3343         struct Scsi_Host *shost = class_to_shost(dev);
3344         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3345         unsigned long lock_flags = 0;
3346         ssize_t ret;
3347
3348         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3349         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3350                                 IPR_TRACE_SIZE);
3351         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352
3353         return ret;
3354 }
3355
3356 static struct bin_attribute ipr_trace_attr = {
3357         .attr = {
3358                 .name = "trace",
3359                 .mode = S_IRUGO,
3360         },
3361         .size = 0,
3362         .read = ipr_read_trace,
3363 };
3364 #endif
3365
3366 /**
3367  * ipr_show_fw_version - Show the firmware version
3368  * @dev:        class device struct
3369  * @buf:        buffer
3370  *
3371  * Return value:
3372  *      number of bytes printed to buffer
3373  **/
3374 static ssize_t ipr_show_fw_version(struct device *dev,
3375                                    struct device_attribute *attr, char *buf)
3376 {
3377         struct Scsi_Host *shost = class_to_shost(dev);
3378         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3379         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3380         unsigned long lock_flags = 0;
3381         int len;
3382
3383         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3384         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3385                        ucode_vpd->major_release, ucode_vpd->card_type,
3386                        ucode_vpd->minor_release[0],
3387                        ucode_vpd->minor_release[1]);
3388         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3389         return len;
3390 }
3391
3392 static struct device_attribute ipr_fw_version_attr = {
3393         .attr = {
3394                 .name =         "fw_version",
3395                 .mode =         S_IRUGO,
3396         },
3397         .show = ipr_show_fw_version,
3398 };
3399
3400 /**
3401  * ipr_show_log_level - Show the adapter's error logging level
3402  * @dev:        class device struct
3403  * @buf:        buffer
3404  *
3405  * Return value:
3406  *      number of bytes printed to buffer
3407  **/
3408 static ssize_t ipr_show_log_level(struct device *dev,
3409                                    struct device_attribute *attr, char *buf)
3410 {
3411         struct Scsi_Host *shost = class_to_shost(dev);
3412         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3413         unsigned long lock_flags = 0;
3414         int len;
3415
3416         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3418         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3419         return len;
3420 }
3421
3422 /**
3423  * ipr_store_log_level - Change the adapter's error logging level
3424  * @dev:        class device struct
3425  * @buf:        buffer
3426  *
3427  * Return value:
3428  *      number of bytes printed to buffer
3429  **/
3430 static ssize_t ipr_store_log_level(struct device *dev,
3431                                    struct device_attribute *attr,
3432                                    const char *buf, size_t count)
3433 {
3434         struct Scsi_Host *shost = class_to_shost(dev);
3435         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436         unsigned long lock_flags = 0;
3437
3438         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3439         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3440         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3441         return strlen(buf);
3442 }
3443
3444 static struct device_attribute ipr_log_level_attr = {
3445         .attr = {
3446                 .name =         "log_level",
3447                 .mode =         S_IRUGO | S_IWUSR,
3448         },
3449         .show = ipr_show_log_level,
3450         .store = ipr_store_log_level
3451 };
3452
3453 /**
3454  * ipr_store_diagnostics - IOA Diagnostics interface
3455  * @dev:        device struct
3456  * @buf:        buffer
3457  * @count:      buffer size
3458  *
3459  * This function will reset the adapter and wait a reasonable
3460  * amount of time for any errors that the adapter might log.
3461  *
3462  * Return value:
3463  *      count on success / other on failure
3464  **/
3465 static ssize_t ipr_store_diagnostics(struct device *dev,
3466                                      struct device_attribute *attr,
3467                                      const char *buf, size_t count)
3468 {
3469         struct Scsi_Host *shost = class_to_shost(dev);
3470         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471         unsigned long lock_flags = 0;
3472         int rc = count;
3473
3474         if (!capable(CAP_SYS_ADMIN))
3475                 return -EACCES;
3476
3477         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3478         while (ioa_cfg->in_reset_reload) {
3479                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3480                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3481                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3482         }
3483
3484         ioa_cfg->errors_logged = 0;
3485         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3486
3487         if (ioa_cfg->in_reset_reload) {
3488                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3490
3491                 /* Wait for a second for any errors to be logged */
3492                 msleep(1000);
3493         } else {
3494                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3495                 return -EIO;
3496         }
3497
3498         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3499         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3500                 rc = -EIO;
3501         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3502
3503         return rc;
3504 }
3505
3506 static struct device_attribute ipr_diagnostics_attr = {
3507         .attr = {
3508                 .name =         "run_diagnostics",
3509                 .mode =         S_IWUSR,
3510         },
3511         .store = ipr_store_diagnostics
3512 };
3513
3514 /**
3515  * ipr_show_adapter_state - Show the adapter's state
3516  * @class_dev:  device struct
3517  * @buf:        buffer
3518  *
3519  * Return value:
3520  *      number of bytes printed to buffer
3521  **/
3522 static ssize_t ipr_show_adapter_state(struct device *dev,
3523                                       struct device_attribute *attr, char *buf)
3524 {
3525         struct Scsi_Host *shost = class_to_shost(dev);
3526         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3527         unsigned long lock_flags = 0;
3528         int len;
3529
3530         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3531         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3532                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3533         else
3534                 len = snprintf(buf, PAGE_SIZE, "online\n");
3535         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3536         return len;
3537 }
3538
3539 /**
3540  * ipr_store_adapter_state - Change adapter state
3541  * @dev:        device struct
3542  * @buf:        buffer
3543  * @count:      buffer size
3544  *
3545  * This function will change the adapter's state.
3546  *
3547  * Return value:
3548  *      count on success / other on failure
3549  **/
3550 static ssize_t ipr_store_adapter_state(struct device *dev,
3551                                        struct device_attribute *attr,
3552                                        const char *buf, size_t count)
3553 {
3554         struct Scsi_Host *shost = class_to_shost(dev);
3555         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3556         unsigned long lock_flags;
3557         int result = count, i;
3558
3559         if (!capable(CAP_SYS_ADMIN))
3560                 return -EACCES;
3561
3562         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3563         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3564             !strncmp(buf, "online", 6)) {
3565                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3566                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3567                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3568                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3569                 }
3570                 wmb();
3571                 ioa_cfg->reset_retries = 0;
3572                 ioa_cfg->in_ioa_bringdown = 0;
3573                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3574         }
3575         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3577
3578         return result;
3579 }
3580
3581 static struct device_attribute ipr_ioa_state_attr = {
3582         .attr = {
3583                 .name =         "online_state",
3584                 .mode =         S_IRUGO | S_IWUSR,
3585         },
3586         .show = ipr_show_adapter_state,
3587         .store = ipr_store_adapter_state
3588 };
3589
3590 /**
3591  * ipr_store_reset_adapter - Reset the adapter
3592  * @dev:        device struct
3593  * @buf:        buffer
3594  * @count:      buffer size
3595  *
3596  * This function will reset the adapter.
3597  *
3598  * Return value:
3599  *      count on success / other on failure
3600  **/
3601 static ssize_t ipr_store_reset_adapter(struct device *dev,
3602                                        struct device_attribute *attr,
3603                                        const char *buf, size_t count)
3604 {
3605         struct Scsi_Host *shost = class_to_shost(dev);
3606         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3607         unsigned long lock_flags;
3608         int result = count;
3609
3610         if (!capable(CAP_SYS_ADMIN))
3611                 return -EACCES;
3612
3613         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614         if (!ioa_cfg->in_reset_reload)
3615                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3616         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3617         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3618
3619         return result;
3620 }
3621
3622 static struct device_attribute ipr_ioa_reset_attr = {
3623         .attr = {
3624                 .name =         "reset_host",
3625                 .mode =         S_IWUSR,
3626         },
3627         .store = ipr_store_reset_adapter
3628 };
3629
3630 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3631  /**
3632  * ipr_show_iopoll_weight - Show ipr polling mode
3633  * @dev:        class device struct
3634  * @buf:        buffer
3635  *
3636  * Return value:
3637  *      number of bytes printed to buffer
3638  **/
3639 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3640                                    struct device_attribute *attr, char *buf)
3641 {
3642         struct Scsi_Host *shost = class_to_shost(dev);
3643         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3644         unsigned long lock_flags = 0;
3645         int len;
3646
3647         spin_lock_irqsave(shost->host_lock, lock_flags);
3648         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3649         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3650
3651         return len;
3652 }
3653
3654 /**
3655  * ipr_store_iopoll_weight - Change the adapter's polling mode
3656  * @dev:        class device struct
3657  * @buf:        buffer
3658  *
3659  * Return value:
3660  *      number of bytes printed to buffer
3661  **/
3662 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3663                                         struct device_attribute *attr,
3664                                         const char *buf, size_t count)
3665 {
3666         struct Scsi_Host *shost = class_to_shost(dev);
3667         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3668         unsigned long user_iopoll_weight;
3669         unsigned long lock_flags = 0;
3670         int i;
3671
3672         if (!ioa_cfg->sis64) {
3673                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3674                 return -EINVAL;
3675         }
3676         if (kstrtoul(buf, 10, &user_iopoll_weight))
3677                 return -EINVAL;
3678
3679         if (user_iopoll_weight > 256) {
3680                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3681                 return -EINVAL;
3682         }
3683
3684         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3685                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3686                 return strlen(buf);
3687         }
3688
3689         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3690                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3691                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3692         }
3693
3694         spin_lock_irqsave(shost->host_lock, lock_flags);
3695         ioa_cfg->iopoll_weight = user_iopoll_weight;
3696         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3697                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3698                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3699                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3700                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3701                 }
3702         }
3703         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3704
3705         return strlen(buf);
3706 }
3707
3708 static struct device_attribute ipr_iopoll_weight_attr = {
3709         .attr = {
3710                 .name =         "iopoll_weight",
3711                 .mode =         S_IRUGO | S_IWUSR,
3712         },
3713         .show = ipr_show_iopoll_weight,
3714         .store = ipr_store_iopoll_weight
3715 };
3716
3717 /**
3718  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3719  * @buf_len:            buffer length
3720  *
3721  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3722  * list to use for microcode download
3723  *
3724  * Return value:
3725  *      pointer to sglist / NULL on failure
3726  **/
3727 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3728 {
3729         int sg_size, order, bsize_elem, num_elem, i, j;
3730         struct ipr_sglist *sglist;
3731         struct scatterlist *scatterlist;
3732         struct page *page;
3733
3734         /* Get the minimum size per scatter/gather element */
3735         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3736
3737         /* Get the actual size per element */
3738         order = get_order(sg_size);
3739
3740         /* Determine the actual number of bytes per element */
3741         bsize_elem = PAGE_SIZE * (1 << order);
3742
3743         /* Determine the actual number of sg entries needed */
3744         if (buf_len % bsize_elem)
3745                 num_elem = (buf_len / bsize_elem) + 1;
3746         else
3747                 num_elem = buf_len / bsize_elem;
3748
3749         /* Allocate a scatter/gather list for the DMA */
3750         sglist = kzalloc(sizeof(struct ipr_sglist) +
3751                          (sizeof(struct scatterlist) * (num_elem - 1)),
3752                          GFP_KERNEL);
3753
3754         if (sglist == NULL) {
3755                 ipr_trace;
3756                 return NULL;
3757         }
3758
3759         scatterlist = sglist->scatterlist;
3760         sg_init_table(scatterlist, num_elem);
3761
3762         sglist->order = order;
3763         sglist->num_sg = num_elem;
3764
3765         /* Allocate a bunch of sg elements */
3766         for (i = 0; i < num_elem; i++) {
3767                 page = alloc_pages(GFP_KERNEL, order);
3768                 if (!page) {
3769                         ipr_trace;
3770
3771                         /* Free up what we already allocated */
3772                         for (j = i - 1; j >= 0; j--)
3773                                 __free_pages(sg_page(&scatterlist[j]), order);
3774                         kfree(sglist);
3775                         return NULL;
3776                 }
3777
3778                 sg_set_page(&scatterlist[i], page, 0, 0);
3779         }
3780
3781         return sglist;
3782 }
3783
3784 /**
3785  * ipr_free_ucode_buffer - Frees a microcode download buffer
3786  * @p_dnld:             scatter/gather list pointer
3787  *
3788  * Free a DMA'able ucode download buffer previously allocated with
3789  * ipr_alloc_ucode_buffer
3790  *
3791  * Return value:
3792  *      nothing
3793  **/
3794 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3795 {
3796         int i;
3797
3798         for (i = 0; i < sglist->num_sg; i++)
3799                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3800
3801         kfree(sglist);
3802 }
3803
3804 /**
3805  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3806  * @sglist:             scatter/gather list pointer
3807  * @buffer:             buffer pointer
3808  * @len:                buffer length
3809  *
3810  * Copy a microcode image from a user buffer into a buffer allocated by
3811  * ipr_alloc_ucode_buffer
3812  *
3813  * Return value:
3814  *      0 on success / other on failure
3815  **/
3816 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3817                                  u8 *buffer, u32 len)
3818 {
3819         int bsize_elem, i, result = 0;
3820         struct scatterlist *scatterlist;
3821         void *kaddr;
3822
3823         /* Determine the actual number of bytes per element */
3824         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3825
3826         scatterlist = sglist->scatterlist;
3827
3828         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3829                 struct page *page = sg_page(&scatterlist[i]);
3830
3831                 kaddr = kmap(page);
3832                 memcpy(kaddr, buffer, bsize_elem);
3833                 kunmap(page);
3834
3835                 scatterlist[i].length = bsize_elem;
3836
3837                 if (result != 0) {
3838                         ipr_trace;
3839                         return result;
3840                 }
3841         }
3842
3843         if (len % bsize_elem) {
3844                 struct page *page = sg_page(&scatterlist[i]);
3845
3846                 kaddr = kmap(page);
3847                 memcpy(kaddr, buffer, len % bsize_elem);
3848                 kunmap(page);
3849
3850                 scatterlist[i].length = len % bsize_elem;
3851         }
3852
3853         sglist->buffer_len = len;
3854         return result;
3855 }
3856
3857 /**
3858  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3859  * @ipr_cmd:            ipr command struct
3860  * @sglist:             scatter/gather list
3861  *
3862  * Builds a microcode download IOA data list (IOADL).
3863  *
3864  **/
3865 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3866                                     struct ipr_sglist *sglist)
3867 {
3868         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3869         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3870         struct scatterlist *scatterlist = sglist->scatterlist;
3871         int i;
3872
3873         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3874         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3875         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3876
3877         ioarcb->ioadl_len =
3878                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3879         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3880                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3881                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3882                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3883         }
3884
3885         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3886 }
3887
3888 /**
3889  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3890  * @ipr_cmd:    ipr command struct
3891  * @sglist:             scatter/gather list
3892  *
3893  * Builds a microcode download IOA data list (IOADL).
3894  *
3895  **/
3896 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3897                                   struct ipr_sglist *sglist)
3898 {
3899         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3900         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3901         struct scatterlist *scatterlist = sglist->scatterlist;
3902         int i;
3903
3904         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3905         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3906         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3907
3908         ioarcb->ioadl_len =
3909                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3910
3911         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3912                 ioadl[i].flags_and_data_len =
3913                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3914                 ioadl[i].address =
3915                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3916         }
3917
3918         ioadl[i-1].flags_and_data_len |=
3919                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3920 }
3921
3922 /**
3923  * ipr_update_ioa_ucode - Update IOA's microcode
3924  * @ioa_cfg:    ioa config struct
3925  * @sglist:             scatter/gather list
3926  *
3927  * Initiate an adapter reset to update the IOA's microcode
3928  *
3929  * Return value:
3930  *      0 on success / -EIO on failure
3931  **/
3932 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3933                                 struct ipr_sglist *sglist)
3934 {
3935         unsigned long lock_flags;
3936
3937         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3938         while (ioa_cfg->in_reset_reload) {
3939                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3941                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942         }
3943
3944         if (ioa_cfg->ucode_sglist) {
3945                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3946                 dev_err(&ioa_cfg->pdev->dev,
3947                         "Microcode download already in progress\n");
3948                 return -EIO;
3949         }
3950
3951         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3952                                         sglist->scatterlist, sglist->num_sg,
3953                                         DMA_TO_DEVICE);
3954
3955         if (!sglist->num_dma_sg) {
3956                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957                 dev_err(&ioa_cfg->pdev->dev,
3958                         "Failed to map microcode download buffer!\n");
3959                 return -EIO;
3960         }
3961
3962         ioa_cfg->ucode_sglist = sglist;
3963         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3964         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3966
3967         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3968         ioa_cfg->ucode_sglist = NULL;
3969         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3970         return 0;
3971 }
3972
3973 /**
3974  * ipr_store_update_fw - Update the firmware on the adapter
3975  * @class_dev:  device struct
3976  * @buf:        buffer
3977  * @count:      buffer size
3978  *
3979  * This function will update the firmware on the adapter.
3980  *
3981  * Return value:
3982  *      count on success / other on failure
3983  **/
3984 static ssize_t ipr_store_update_fw(struct device *dev,
3985                                    struct device_attribute *attr,
3986                                    const char *buf, size_t count)
3987 {
3988         struct Scsi_Host *shost = class_to_shost(dev);
3989         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3990         struct ipr_ucode_image_header *image_hdr;
3991         const struct firmware *fw_entry;
3992         struct ipr_sglist *sglist;
3993         char fname[100];
3994         char *src;
3995         int len, result, dnld_size;
3996
3997         if (!capable(CAP_SYS_ADMIN))
3998                 return -EACCES;
3999
4000         len = snprintf(fname, 99, "%s", buf);
4001         fname[len-1] = '\0';
4002
4003         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4004                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4005                 return -EIO;
4006         }
4007
4008         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4009
4010         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4011         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4012         sglist = ipr_alloc_ucode_buffer(dnld_size);
4013
4014         if (!sglist) {
4015                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4016                 release_firmware(fw_entry);
4017                 return -ENOMEM;
4018         }
4019
4020         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4021
4022         if (result) {
4023                 dev_err(&ioa_cfg->pdev->dev,
4024                         "Microcode buffer copy to DMA buffer failed\n");
4025                 goto out;
4026         }
4027
4028         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4029
4030         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4031
4032         if (!result)
4033                 result = count;
4034 out:
4035         ipr_free_ucode_buffer(sglist);
4036         release_firmware(fw_entry);
4037         return result;
4038 }
4039
4040 static struct device_attribute ipr_update_fw_attr = {
4041         .attr = {
4042                 .name =         "update_fw",
4043                 .mode =         S_IWUSR,
4044         },
4045         .store = ipr_store_update_fw
4046 };
4047
4048 /**
4049  * ipr_show_fw_type - Show the adapter's firmware type.
4050  * @dev:        class device struct
4051  * @buf:        buffer
4052  *
4053  * Return value:
4054  *      number of bytes printed to buffer
4055  **/
4056 static ssize_t ipr_show_fw_type(struct device *dev,
4057                                 struct device_attribute *attr, char *buf)
4058 {
4059         struct Scsi_Host *shost = class_to_shost(dev);
4060         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4061         unsigned long lock_flags = 0;
4062         int len;
4063
4064         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4065         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4066         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4067         return len;
4068 }
4069
4070 static struct device_attribute ipr_ioa_fw_type_attr = {
4071         .attr = {
4072                 .name =         "fw_type",
4073                 .mode =         S_IRUGO,
4074         },
4075         .show = ipr_show_fw_type
4076 };
4077
4078 static struct device_attribute *ipr_ioa_attrs[] = {
4079         &ipr_fw_version_attr,
4080         &ipr_log_level_attr,
4081         &ipr_diagnostics_attr,
4082         &ipr_ioa_state_attr,
4083         &ipr_ioa_reset_attr,
4084         &ipr_update_fw_attr,
4085         &ipr_ioa_fw_type_attr,
4086         &ipr_iopoll_weight_attr,
4087         NULL,
4088 };
4089
4090 #ifdef CONFIG_SCSI_IPR_DUMP
4091 /**
4092  * ipr_read_dump - Dump the adapter
4093  * @filp:               open sysfs file
4094  * @kobj:               kobject struct
4095  * @bin_attr:           bin_attribute struct
4096  * @buf:                buffer
4097  * @off:                offset
4098  * @count:              buffer size
4099  *
4100  * Return value:
4101  *      number of bytes printed to buffer
4102  **/
4103 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4104                              struct bin_attribute *bin_attr,
4105                              char *buf, loff_t off, size_t count)
4106 {
4107         struct device *cdev = container_of(kobj, struct device, kobj);
4108         struct Scsi_Host *shost = class_to_shost(cdev);
4109         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4110         struct ipr_dump *dump;
4111         unsigned long lock_flags = 0;
4112         char *src;
4113         int len, sdt_end;
4114         size_t rc = count;
4115
4116         if (!capable(CAP_SYS_ADMIN))
4117                 return -EACCES;
4118
4119         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4120         dump = ioa_cfg->dump;
4121
4122         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4123                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4124                 return 0;
4125         }
4126         kref_get(&dump->kref);
4127         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128
4129         if (off > dump->driver_dump.hdr.len) {
4130                 kref_put(&dump->kref, ipr_release_dump);
4131                 return 0;
4132         }
4133
4134         if (off + count > dump->driver_dump.hdr.len) {
4135                 count = dump->driver_dump.hdr.len - off;
4136                 rc = count;
4137         }
4138
4139         if (count && off < sizeof(dump->driver_dump)) {
4140                 if (off + count > sizeof(dump->driver_dump))
4141                         len = sizeof(dump->driver_dump) - off;
4142                 else
4143                         len = count;
4144                 src = (u8 *)&dump->driver_dump + off;
4145                 memcpy(buf, src, len);
4146                 buf += len;
4147                 off += len;
4148                 count -= len;
4149         }
4150
4151         off -= sizeof(dump->driver_dump);
4152
4153         if (ioa_cfg->sis64)
4154                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4155                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4156                            sizeof(struct ipr_sdt_entry));
4157         else
4158                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4159                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4160
4161         if (count && off < sdt_end) {
4162                 if (off + count > sdt_end)
4163                         len = sdt_end - off;
4164                 else
4165                         len = count;
4166                 src = (u8 *)&dump->ioa_dump + off;
4167                 memcpy(buf, src, len);
4168                 buf += len;
4169                 off += len;
4170                 count -= len;
4171         }
4172
4173         off -= sdt_end;
4174
4175         while (count) {
4176                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4177                         len = PAGE_ALIGN(off) - off;
4178                 else
4179                         len = count;
4180                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4181                 src += off & ~PAGE_MASK;
4182                 memcpy(buf, src, len);
4183                 buf += len;
4184                 off += len;
4185                 count -= len;
4186         }
4187
4188         kref_put(&dump->kref, ipr_release_dump);
4189         return rc;
4190 }
4191
4192 /**
4193  * ipr_alloc_dump - Prepare for adapter dump
4194  * @ioa_cfg:    ioa config struct
4195  *
4196  * Return value:
4197  *      0 on success / other on failure
4198  **/
4199 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4200 {
4201         struct ipr_dump *dump;
4202         __be32 **ioa_data;
4203         unsigned long lock_flags = 0;
4204
4205         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4206
4207         if (!dump) {
4208                 ipr_err("Dump memory allocation failed\n");
4209                 return -ENOMEM;
4210         }
4211
4212         if (ioa_cfg->sis64)
4213                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4214         else
4215                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4216
4217         if (!ioa_data) {
4218                 ipr_err("Dump memory allocation failed\n");
4219                 kfree(dump);
4220                 return -ENOMEM;
4221         }
4222
4223         dump->ioa_dump.ioa_data = ioa_data;
4224
4225         kref_init(&dump->kref);
4226         dump->ioa_cfg = ioa_cfg;
4227
4228         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4229
4230         if (INACTIVE != ioa_cfg->sdt_state) {
4231                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4232                 vfree(dump->ioa_dump.ioa_data);
4233                 kfree(dump);
4234                 return 0;
4235         }
4236
4237         ioa_cfg->dump = dump;
4238         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4239         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4240                 ioa_cfg->dump_taken = 1;
4241                 schedule_work(&ioa_cfg->work_q);
4242         }
4243         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4244
4245         return 0;
4246 }
4247
4248 /**
4249  * ipr_free_dump - Free adapter dump memory
4250  * @ioa_cfg:    ioa config struct
4251  *
4252  * Return value:
4253  *      0 on success / other on failure
4254  **/
4255 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4256 {
4257         struct ipr_dump *dump;
4258         unsigned long lock_flags = 0;
4259
4260         ENTER;
4261
4262         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4263         dump = ioa_cfg->dump;
4264         if (!dump) {
4265                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4266                 return 0;
4267         }
4268
4269         ioa_cfg->dump = NULL;
4270         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4271
4272         kref_put(&dump->kref, ipr_release_dump);
4273
4274         LEAVE;
4275         return 0;
4276 }
4277
4278 /**
4279  * ipr_write_dump - Setup dump state of adapter
4280  * @filp:               open sysfs file
4281  * @kobj:               kobject struct
4282  * @bin_attr:           bin_attribute struct
4283  * @buf:                buffer
4284  * @off:                offset
4285  * @count:              buffer size
4286  *
4287  * Return value:
4288  *      number of bytes printed to buffer
4289  **/
4290 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4291                               struct bin_attribute *bin_attr,
4292                               char *buf, loff_t off, size_t count)
4293 {
4294         struct device *cdev = container_of(kobj, struct device, kobj);
4295         struct Scsi_Host *shost = class_to_shost(cdev);
4296         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4297         int rc;
4298
4299         if (!capable(CAP_SYS_ADMIN))
4300                 return -EACCES;
4301
4302         if (buf[0] == '1')
4303                 rc = ipr_alloc_dump(ioa_cfg);
4304         else if (buf[0] == '0')
4305                 rc = ipr_free_dump(ioa_cfg);
4306         else
4307                 return -EINVAL;
4308
4309         if (rc)
4310                 return rc;
4311         else
4312                 return count;
4313 }
4314
4315 static struct bin_attribute ipr_dump_attr = {
4316         .attr = {
4317                 .name = "dump",
4318                 .mode = S_IRUSR | S_IWUSR,
4319         },
4320         .size = 0,
4321         .read = ipr_read_dump,
4322         .write = ipr_write_dump
4323 };
4324 #else
4325 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4326 #endif
4327
4328 /**
4329  * ipr_change_queue_depth - Change the device's queue depth
4330  * @sdev:       scsi device struct
4331  * @qdepth:     depth to set
4332  * @reason:     calling context
4333  *
4334  * Return value:
4335  *      actual depth set
4336  **/
4337 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4338 {
4339         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4340         struct ipr_resource_entry *res;
4341         unsigned long lock_flags = 0;
4342
4343         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4344         res = (struct ipr_resource_entry *)sdev->hostdata;
4345
4346         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4347                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4348         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4349
4350         scsi_change_queue_depth(sdev, qdepth);
4351         return sdev->queue_depth;
4352 }
4353
4354 /**
4355  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4356  * @dev:        device struct
4357  * @attr:       device attribute structure
4358  * @buf:        buffer
4359  *
4360  * Return value:
4361  *      number of bytes printed to buffer
4362  **/
4363 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4364 {
4365         struct scsi_device *sdev = to_scsi_device(dev);
4366         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4367         struct ipr_resource_entry *res;
4368         unsigned long lock_flags = 0;
4369         ssize_t len = -ENXIO;
4370
4371         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4372         res = (struct ipr_resource_entry *)sdev->hostdata;
4373         if (res)
4374                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4375         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4376         return len;
4377 }
4378
4379 static struct device_attribute ipr_adapter_handle_attr = {
4380         .attr = {
4381                 .name =         "adapter_handle",
4382                 .mode =         S_IRUSR,
4383         },
4384         .show = ipr_show_adapter_handle
4385 };
4386
4387 /**
4388  * ipr_show_resource_path - Show the resource path or the resource address for
4389  *                          this device.
4390  * @dev:        device struct
4391  * @attr:       device attribute structure
4392  * @buf:        buffer
4393  *
4394  * Return value:
4395  *      number of bytes printed to buffer
4396  **/
4397 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4398 {
4399         struct scsi_device *sdev = to_scsi_device(dev);
4400         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4401         struct ipr_resource_entry *res;
4402         unsigned long lock_flags = 0;
4403         ssize_t len = -ENXIO;
4404         char buffer[IPR_MAX_RES_PATH_LENGTH];
4405
4406         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407         res = (struct ipr_resource_entry *)sdev->hostdata;
4408         if (res && ioa_cfg->sis64)
4409                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4410                                __ipr_format_res_path(res->res_path, buffer,
4411                                                      sizeof(buffer)));
4412         else if (res)
4413                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4414                                res->bus, res->target, res->lun);
4415
4416         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417         return len;
4418 }
4419
4420 static struct device_attribute ipr_resource_path_attr = {
4421         .attr = {
4422                 .name =         "resource_path",
4423                 .mode =         S_IRUGO,
4424         },
4425         .show = ipr_show_resource_path
4426 };
4427
4428 /**
4429  * ipr_show_device_id - Show the device_id for this device.
4430  * @dev:        device struct
4431  * @attr:       device attribute structure
4432  * @buf:        buffer
4433  *
4434  * Return value:
4435  *      number of bytes printed to buffer
4436  **/
4437 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4438 {
4439         struct scsi_device *sdev = to_scsi_device(dev);
4440         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4441         struct ipr_resource_entry *res;
4442         unsigned long lock_flags = 0;
4443         ssize_t len = -ENXIO;
4444
4445         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4446         res = (struct ipr_resource_entry *)sdev->hostdata;
4447         if (res && ioa_cfg->sis64)
4448                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4449         else if (res)
4450                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4451
4452         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4453         return len;
4454 }
4455
4456 static struct device_attribute ipr_device_id_attr = {
4457         .attr = {
4458                 .name =         "device_id",
4459                 .mode =         S_IRUGO,
4460         },
4461         .show = ipr_show_device_id
4462 };
4463
4464 /**
4465  * ipr_show_resource_type - Show the resource type for this device.
4466  * @dev:        device struct
4467  * @attr:       device attribute structure
4468  * @buf:        buffer
4469  *
4470  * Return value:
4471  *      number of bytes printed to buffer
4472  **/
4473 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4474 {
4475         struct scsi_device *sdev = to_scsi_device(dev);
4476         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4477         struct ipr_resource_entry *res;
4478         unsigned long lock_flags = 0;
4479         ssize_t len = -ENXIO;
4480
4481         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4482         res = (struct ipr_resource_entry *)sdev->hostdata;
4483
4484         if (res)
4485                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4486
4487         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4488         return len;
4489 }
4490
4491 static struct device_attribute ipr_resource_type_attr = {
4492         .attr = {
4493                 .name =         "resource_type",
4494                 .mode =         S_IRUGO,
4495         },
4496         .show = ipr_show_resource_type
4497 };
4498
4499 static struct device_attribute *ipr_dev_attrs[] = {
4500         &ipr_adapter_handle_attr,
4501         &ipr_resource_path_attr,
4502         &ipr_device_id_attr,
4503         &ipr_resource_type_attr,
4504         NULL,
4505 };
4506
4507 /**
4508  * ipr_biosparam - Return the HSC mapping
4509  * @sdev:                       scsi device struct
4510  * @block_device:       block device pointer
4511  * @capacity:           capacity of the device
4512  * @parm:                       Array containing returned HSC values.
4513  *
4514  * This function generates the HSC parms that fdisk uses.
4515  * We want to make sure we return something that places partitions
4516  * on 4k boundaries for best performance with the IOA.
4517  *
4518  * Return value:
4519  *      0 on success
4520  **/
4521 static int ipr_biosparam(struct scsi_device *sdev,
4522                          struct block_device *block_device,
4523                          sector_t capacity, int *parm)
4524 {
4525         int heads, sectors;
4526         sector_t cylinders;
4527
4528         heads = 128;
4529         sectors = 32;
4530
4531         cylinders = capacity;
4532         sector_div(cylinders, (128 * 32));
4533
4534         /* return result */
4535         parm[0] = heads;
4536         parm[1] = sectors;
4537         parm[2] = cylinders;
4538
4539         return 0;
4540 }
4541
4542 /**
4543  * ipr_find_starget - Find target based on bus/target.
4544  * @starget:    scsi target struct
4545  *
4546  * Return value:
4547  *      resource entry pointer if found / NULL if not found
4548  **/
4549 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4550 {
4551         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4552         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4553         struct ipr_resource_entry *res;
4554
4555         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4556                 if ((res->bus == starget->channel) &&
4557                     (res->target == starget->id)) {
4558                         return res;
4559                 }
4560         }
4561
4562         return NULL;
4563 }
4564
4565 static struct ata_port_info sata_port_info;
4566
4567 /**
4568  * ipr_target_alloc - Prepare for commands to a SCSI target
4569  * @starget:    scsi target struct
4570  *
4571  * If the device is a SATA device, this function allocates an
4572  * ATA port with libata, else it does nothing.
4573  *
4574  * Return value:
4575  *      0 on success / non-0 on failure
4576  **/
4577 static int ipr_target_alloc(struct scsi_target *starget)
4578 {
4579         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4580         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4581         struct ipr_sata_port *sata_port;
4582         struct ata_port *ap;
4583         struct ipr_resource_entry *res;
4584         unsigned long lock_flags;
4585
4586         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587         res = ipr_find_starget(starget);
4588         starget->hostdata = NULL;
4589
4590         if (res && ipr_is_gata(res)) {
4591                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4593                 if (!sata_port)
4594                         return -ENOMEM;
4595
4596                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4597                 if (ap) {
4598                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4599                         sata_port->ioa_cfg = ioa_cfg;
4600                         sata_port->ap = ap;
4601                         sata_port->res = res;
4602
4603                         res->sata_port = sata_port;
4604                         ap->private_data = sata_port;
4605                         starget->hostdata = sata_port;
4606                 } else {
4607                         kfree(sata_port);
4608                         return -ENOMEM;
4609                 }
4610         }
4611         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4612
4613         return 0;
4614 }
4615
4616 /**
4617  * ipr_target_destroy - Destroy a SCSI target
4618  * @starget:    scsi target struct
4619  *
4620  * If the device was a SATA device, this function frees the libata
4621  * ATA port, else it does nothing.
4622  *
4623  **/
4624 static void ipr_target_destroy(struct scsi_target *starget)
4625 {
4626         struct ipr_sata_port *sata_port = starget->hostdata;
4627         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4628         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4629
4630         if (ioa_cfg->sis64) {
4631                 if (!ipr_find_starget(starget)) {
4632                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4633                                 clear_bit(starget->id, ioa_cfg->array_ids);
4634                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4635                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4636                         else if (starget->channel == 0)
4637                                 clear_bit(starget->id, ioa_cfg->target_ids);
4638                 }
4639         }
4640
4641         if (sata_port) {
4642                 starget->hostdata = NULL;
4643                 ata_sas_port_destroy(sata_port->ap);
4644                 kfree(sata_port);
4645         }
4646 }
4647
4648 /**
4649  * ipr_find_sdev - Find device based on bus/target/lun.
4650  * @sdev:       scsi device struct
4651  *
4652  * Return value:
4653  *      resource entry pointer if found / NULL if not found
4654  **/
4655 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4656 {
4657         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4658         struct ipr_resource_entry *res;
4659
4660         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4661                 if ((res->bus == sdev->channel) &&
4662                     (res->target == sdev->id) &&
4663                     (res->lun == sdev->lun))
4664                         return res;
4665         }
4666
4667         return NULL;
4668 }
4669
4670 /**
4671  * ipr_slave_destroy - Unconfigure a SCSI device
4672  * @sdev:       scsi device struct
4673  *
4674  * Return value:
4675  *      nothing
4676  **/
4677 static void ipr_slave_destroy(struct scsi_device *sdev)
4678 {
4679         struct ipr_resource_entry *res;
4680         struct ipr_ioa_cfg *ioa_cfg;
4681         unsigned long lock_flags = 0;
4682
4683         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4684
4685         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686         res = (struct ipr_resource_entry *) sdev->hostdata;
4687         if (res) {
4688                 if (res->sata_port)
4689                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4690                 sdev->hostdata = NULL;
4691                 res->sdev = NULL;
4692                 res->sata_port = NULL;
4693         }
4694         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4695 }
4696
4697 /**
4698  * ipr_slave_configure - Configure a SCSI device
4699  * @sdev:       scsi device struct
4700  *
4701  * This function configures the specified scsi device.
4702  *
4703  * Return value:
4704  *      0 on success
4705  **/
4706 static int ipr_slave_configure(struct scsi_device *sdev)
4707 {
4708         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4709         struct ipr_resource_entry *res;
4710         struct ata_port *ap = NULL;
4711         unsigned long lock_flags = 0;
4712         char buffer[IPR_MAX_RES_PATH_LENGTH];
4713
4714         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4715         res = sdev->hostdata;
4716         if (res) {
4717                 if (ipr_is_af_dasd_device(res))
4718                         sdev->type = TYPE_RAID;
4719                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4720                         sdev->scsi_level = 4;
4721                         sdev->no_uld_attach = 1;
4722                 }
4723                 if (ipr_is_vset_device(res)) {
4724                         sdev->scsi_level = SCSI_SPC_3;
4725                         blk_queue_rq_timeout(sdev->request_queue,
4726                                              IPR_VSET_RW_TIMEOUT);
4727                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4728                 }
4729                 if (ipr_is_gata(res) && res->sata_port)
4730                         ap = res->sata_port->ap;
4731                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4732
4733                 if (ap) {
4734                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4735                         ata_sas_slave_configure(sdev, ap);
4736                 }
4737
4738                 if (ioa_cfg->sis64)
4739                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4740                                     ipr_format_res_path(ioa_cfg,
4741                                 res->res_path, buffer, sizeof(buffer)));
4742                 return 0;
4743         }
4744         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4745         return 0;
4746 }
4747
4748 /**
4749  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4750  * @sdev:       scsi device struct
4751  *
4752  * This function initializes an ATA port so that future commands
4753  * sent through queuecommand will work.
4754  *
4755  * Return value:
4756  *      0 on success
4757  **/
4758 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4759 {
4760         struct ipr_sata_port *sata_port = NULL;
4761         int rc = -ENXIO;
4762
4763         ENTER;
4764         if (sdev->sdev_target)
4765                 sata_port = sdev->sdev_target->hostdata;
4766         if (sata_port) {
4767                 rc = ata_sas_port_init(sata_port->ap);
4768                 if (rc == 0)
4769                         rc = ata_sas_sync_probe(sata_port->ap);
4770         }
4771
4772         if (rc)
4773                 ipr_slave_destroy(sdev);
4774
4775         LEAVE;
4776         return rc;
4777 }
4778
4779 /**
4780  * ipr_slave_alloc - Prepare for commands to a device.
4781  * @sdev:       scsi device struct
4782  *
4783  * This function saves a pointer to the resource entry
4784  * in the scsi device struct if the device exists. We
4785  * can then use this pointer in ipr_queuecommand when
4786  * handling new commands.
4787  *
4788  * Return value:
4789  *      0 on success / -ENXIO if device does not exist
4790  **/
4791 static int ipr_slave_alloc(struct scsi_device *sdev)
4792 {
4793         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4794         struct ipr_resource_entry *res;
4795         unsigned long lock_flags;
4796         int rc = -ENXIO;
4797
4798         sdev->hostdata = NULL;
4799
4800         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4801
4802         res = ipr_find_sdev(sdev);
4803         if (res) {
4804                 res->sdev = sdev;
4805                 res->add_to_ml = 0;
4806                 res->in_erp = 0;
4807                 sdev->hostdata = res;
4808                 if (!ipr_is_naca_model(res))
4809                         res->needs_sync_complete = 1;
4810                 rc = 0;
4811                 if (ipr_is_gata(res)) {
4812                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4813                         return ipr_ata_slave_alloc(sdev);
4814                 }
4815         }
4816
4817         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4818
4819         return rc;
4820 }
4821
4822 /**
4823  * ipr_match_lun - Match function for specified LUN
4824  * @ipr_cmd:    ipr command struct
4825  * @device:             device to match (sdev)
4826  *
4827  * Returns:
4828  *      1 if command matches sdev / 0 if command does not match sdev
4829  **/
4830 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4831 {
4832         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4833                 return 1;
4834         return 0;
4835 }
4836
4837 /**
4838  * ipr_wait_for_ops - Wait for matching commands to complete
4839  * @ipr_cmd:    ipr command struct
4840  * @device:             device to match (sdev)
4841  * @match:              match function to use
4842  *
4843  * Returns:
4844  *      SUCCESS / FAILED
4845  **/
4846 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4847                             int (*match)(struct ipr_cmnd *, void *))
4848 {
4849         struct ipr_cmnd *ipr_cmd;
4850         int wait;
4851         unsigned long flags;
4852         struct ipr_hrr_queue *hrrq;
4853         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4854         DECLARE_COMPLETION_ONSTACK(comp);
4855
4856         ENTER;
4857         do {
4858                 wait = 0;
4859
4860                 for_each_hrrq(hrrq, ioa_cfg) {
4861                         spin_lock_irqsave(hrrq->lock, flags);
4862                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4863                                 if (match(ipr_cmd, device)) {
4864                                         ipr_cmd->eh_comp = &comp;
4865                                         wait++;
4866                                 }
4867                         }
4868                         spin_unlock_irqrestore(hrrq->lock, flags);
4869                 }
4870
4871                 if (wait) {
4872                         timeout = wait_for_completion_timeout(&comp, timeout);
4873
4874                         if (!timeout) {
4875                                 wait = 0;
4876
4877                                 for_each_hrrq(hrrq, ioa_cfg) {
4878                                         spin_lock_irqsave(hrrq->lock, flags);
4879                                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4880                                                 if (match(ipr_cmd, device)) {
4881                                                         ipr_cmd->eh_comp = NULL;
4882                                                         wait++;
4883                                                 }
4884                                         }
4885                                         spin_unlock_irqrestore(hrrq->lock, flags);
4886                                 }
4887
4888                                 if (wait)
4889                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4890                                 LEAVE;
4891                                 return wait ? FAILED : SUCCESS;
4892                         }
4893                 }
4894         } while (wait);
4895
4896         LEAVE;
4897         return SUCCESS;
4898 }
4899
4900 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4901 {
4902         struct ipr_ioa_cfg *ioa_cfg;
4903         unsigned long lock_flags = 0;
4904         int rc = SUCCESS;
4905
4906         ENTER;
4907         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4908         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4909
4910         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4911                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4912                 dev_err(&ioa_cfg->pdev->dev,
4913                         "Adapter being reset as a result of error recovery.\n");
4914
4915                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4916                         ioa_cfg->sdt_state = GET_DUMP;
4917         }
4918
4919         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4920         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4921         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4922
4923         /* If we got hit with a host reset while we were already resetting
4924          the adapter for some reason, and the reset failed. */
4925         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4926                 ipr_trace;
4927                 rc = FAILED;
4928         }
4929
4930         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4931         LEAVE;
4932         return rc;
4933 }
4934
4935 /**
4936  * ipr_device_reset - Reset the device
4937  * @ioa_cfg:    ioa config struct
4938  * @res:                resource entry struct
4939  *
4940  * This function issues a device reset to the affected device.
4941  * If the device is a SCSI device, a LUN reset will be sent
4942  * to the device first. If that does not work, a target reset
4943  * will be sent. If the device is a SATA device, a PHY reset will
4944  * be sent.
4945  *
4946  * Return value:
4947  *      0 on success / non-zero on failure
4948  **/
4949 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4950                             struct ipr_resource_entry *res)
4951 {
4952         struct ipr_cmnd *ipr_cmd;
4953         struct ipr_ioarcb *ioarcb;
4954         struct ipr_cmd_pkt *cmd_pkt;
4955         struct ipr_ioarcb_ata_regs *regs;
4956         u32 ioasc;
4957
4958         ENTER;
4959         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4960         ioarcb = &ipr_cmd->ioarcb;
4961         cmd_pkt = &ioarcb->cmd_pkt;
4962
4963         if (ipr_cmd->ioa_cfg->sis64) {
4964                 regs = &ipr_cmd->i.ata_ioadl.regs;
4965                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4966         } else
4967                 regs = &ioarcb->u.add_data.u.regs;
4968
4969         ioarcb->res_handle = res->res_handle;
4970         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4971         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4972         if (ipr_is_gata(res)) {
4973                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4974                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4975                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4976         }
4977
4978         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4979         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4980         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4981         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4982                 if (ipr_cmd->ioa_cfg->sis64)
4983                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4984                                sizeof(struct ipr_ioasa_gata));
4985                 else
4986                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4987                                sizeof(struct ipr_ioasa_gata));
4988         }
4989
4990         LEAVE;
4991         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4992 }
4993
4994 /**
4995  * ipr_sata_reset - Reset the SATA port
4996  * @link:       SATA link to reset
4997  * @classes:    class of the attached device
4998  *
4999  * This function issues a SATA phy reset to the affected ATA link.
5000  *
5001  * Return value:
5002  *      0 on success / non-zero on failure
5003  **/
5004 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5005                                 unsigned long deadline)
5006 {
5007         struct ipr_sata_port *sata_port = link->ap->private_data;
5008         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5009         struct ipr_resource_entry *res;
5010         unsigned long lock_flags = 0;
5011         int rc = -ENXIO;
5012
5013         ENTER;
5014         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5015         while (ioa_cfg->in_reset_reload) {
5016                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5017                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5018                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5019         }
5020
5021         res = sata_port->res;
5022         if (res) {
5023                 rc = ipr_device_reset(ioa_cfg, res);
5024                 *classes = res->ata_class;
5025         }
5026
5027         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5028         LEAVE;
5029         return rc;
5030 }
5031
5032 /**
5033  * ipr_eh_dev_reset - Reset the device
5034  * @scsi_cmd:   scsi command struct
5035  *
5036  * This function issues a device reset to the affected device.
5037  * A LUN reset will be sent to the device first. If that does
5038  * not work, a target reset will be sent.
5039  *
5040  * Return value:
5041  *      SUCCESS / FAILED
5042  **/
5043 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5044 {
5045         struct ipr_cmnd *ipr_cmd;
5046         struct ipr_ioa_cfg *ioa_cfg;
5047         struct ipr_resource_entry *res;
5048         struct ata_port *ap;
5049         int rc = 0;
5050         struct ipr_hrr_queue *hrrq;
5051
5052         ENTER;
5053         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5054         res = scsi_cmd->device->hostdata;
5055
5056         if (!res)
5057                 return FAILED;
5058
5059         /*
5060          * If we are currently going through reset/reload, return failed. This will force the
5061          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5062          * reset to complete
5063          */
5064         if (ioa_cfg->in_reset_reload)
5065                 return FAILED;
5066         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5067                 return FAILED;
5068
5069         for_each_hrrq(hrrq, ioa_cfg) {
5070                 spin_lock(&hrrq->_lock);
5071                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5072                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5073                                 if (ipr_cmd->scsi_cmd)
5074                                         ipr_cmd->done = ipr_scsi_eh_done;
5075                                 if (ipr_cmd->qc)
5076                                         ipr_cmd->done = ipr_sata_eh_done;
5077                                 if (ipr_cmd->qc &&
5078                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5079                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5080                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5081                                 }
5082                         }
5083                 }
5084                 spin_unlock(&hrrq->_lock);
5085         }
5086         res->resetting_device = 1;
5087         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5088
5089         if (ipr_is_gata(res) && res->sata_port) {
5090                 ap = res->sata_port->ap;
5091                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5092                 ata_std_error_handler(ap);
5093                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5094
5095                 for_each_hrrq(hrrq, ioa_cfg) {
5096                         spin_lock(&hrrq->_lock);
5097                         list_for_each_entry(ipr_cmd,
5098                                             &hrrq->hrrq_pending_q, queue) {
5099                                 if (ipr_cmd->ioarcb.res_handle ==
5100                                     res->res_handle) {
5101                                         rc = -EIO;
5102                                         break;
5103                                 }
5104                         }
5105                         spin_unlock(&hrrq->_lock);
5106                 }
5107         } else
5108                 rc = ipr_device_reset(ioa_cfg, res);
5109         res->resetting_device = 0;
5110         res->reset_occurred = 1;
5111
5112         LEAVE;
5113         return rc ? FAILED : SUCCESS;
5114 }
5115
5116 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5117 {
5118         int rc;
5119         struct ipr_ioa_cfg *ioa_cfg;
5120
5121         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5122
5123         spin_lock_irq(cmd->device->host->host_lock);
5124         rc = __ipr_eh_dev_reset(cmd);
5125         spin_unlock_irq(cmd->device->host->host_lock);
5126
5127         if (rc == SUCCESS)
5128                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5129
5130         return rc;
5131 }
5132
5133 /**
5134  * ipr_bus_reset_done - Op done function for bus reset.
5135  * @ipr_cmd:    ipr command struct
5136  *
5137  * This function is the op done function for a bus reset
5138  *
5139  * Return value:
5140  *      none
5141  **/
5142 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5143 {
5144         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5145         struct ipr_resource_entry *res;
5146
5147         ENTER;
5148         if (!ioa_cfg->sis64)
5149                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5150                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5151                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5152                                 break;
5153                         }
5154                 }
5155
5156         /*
5157          * If abort has not completed, indicate the reset has, else call the
5158          * abort's done function to wake the sleeping eh thread
5159          */
5160         if (ipr_cmd->sibling->sibling)
5161                 ipr_cmd->sibling->sibling = NULL;
5162         else
5163                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5164
5165         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5166         LEAVE;
5167 }
5168
5169 /**
5170  * ipr_abort_timeout - An abort task has timed out
5171  * @ipr_cmd:    ipr command struct
5172  *
5173  * This function handles when an abort task times out. If this
5174  * happens we issue a bus reset since we have resources tied
5175  * up that must be freed before returning to the midlayer.
5176  *
5177  * Return value:
5178  *      none
5179  **/
5180 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5181 {
5182         struct ipr_cmnd *reset_cmd;
5183         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5184         struct ipr_cmd_pkt *cmd_pkt;
5185         unsigned long lock_flags = 0;
5186
5187         ENTER;
5188         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5189         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5190                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5191                 return;
5192         }
5193
5194         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5195         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5196         ipr_cmd->sibling = reset_cmd;
5197         reset_cmd->sibling = ipr_cmd;
5198         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5199         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5200         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5201         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5202         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5203
5204         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5205         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5206         LEAVE;
5207 }
5208
5209 /**
5210  * ipr_cancel_op - Cancel specified op
5211  * @scsi_cmd:   scsi command struct
5212  *
5213  * This function cancels specified op.
5214  *
5215  * Return value:
5216  *      SUCCESS / FAILED
5217  **/
5218 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5219 {
5220         struct ipr_cmnd *ipr_cmd;
5221         struct ipr_ioa_cfg *ioa_cfg;
5222         struct ipr_resource_entry *res;
5223         struct ipr_cmd_pkt *cmd_pkt;
5224         u32 ioasc, int_reg;
5225         int op_found = 0;
5226         struct ipr_hrr_queue *hrrq;
5227
5228         ENTER;
5229         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5230         res = scsi_cmd->device->hostdata;
5231
5232         /* If we are currently going through reset/reload, return failed.
5233          * This will force the mid-layer to call ipr_eh_host_reset,
5234          * which will then go to sleep and wait for the reset to complete
5235          */
5236         if (ioa_cfg->in_reset_reload ||
5237             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5238                 return FAILED;
5239         if (!res)
5240                 return FAILED;
5241
5242         /*
5243          * If we are aborting a timed out op, chances are that the timeout was caused
5244          * by a still not detected EEH error. In such cases, reading a register will
5245          * trigger the EEH recovery infrastructure.
5246          */
5247         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5248
5249         if (!ipr_is_gscsi(res))
5250                 return FAILED;
5251
5252         for_each_hrrq(hrrq, ioa_cfg) {
5253                 spin_lock(&hrrq->_lock);
5254                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5255                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5256                                 ipr_cmd->done = ipr_scsi_eh_done;
5257                                 op_found = 1;
5258                                 break;
5259                         }
5260                 }
5261                 spin_unlock(&hrrq->_lock);
5262         }
5263
5264         if (!op_found)
5265                 return SUCCESS;
5266
5267         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5268         ipr_cmd->ioarcb.res_handle = res->res_handle;
5269         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5270         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5271         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5272         ipr_cmd->u.sdev = scsi_cmd->device;
5273
5274         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5275                     scsi_cmd->cmnd[0]);
5276         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5277         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5278
5279         /*
5280          * If the abort task timed out and we sent a bus reset, we will get
5281          * one the following responses to the abort
5282          */
5283         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5284                 ioasc = 0;
5285                 ipr_trace;
5286         }
5287
5288         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5289         if (!ipr_is_naca_model(res))
5290                 res->needs_sync_complete = 1;
5291
5292         LEAVE;
5293         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5294 }
5295
5296 /**
5297  * ipr_eh_abort - Abort a single op
5298  * @scsi_cmd:   scsi command struct
5299  *
5300  * Return value:
5301  *      0 if scan in progress / 1 if scan is complete
5302  **/
5303 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5304 {
5305         unsigned long lock_flags;
5306         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5307         int rc = 0;
5308
5309         spin_lock_irqsave(shost->host_lock, lock_flags);
5310         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5311                 rc = 1;
5312         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5313                 rc = 1;
5314         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5315         return rc;
5316 }
5317
5318 /**
5319  * ipr_eh_host_reset - Reset the host adapter
5320  * @scsi_cmd:   scsi command struct
5321  *
5322  * Return value:
5323  *      SUCCESS / FAILED
5324  **/
5325 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5326 {
5327         unsigned long flags;
5328         int rc;
5329         struct ipr_ioa_cfg *ioa_cfg;
5330
5331         ENTER;
5332
5333         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5334
5335         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5336         rc = ipr_cancel_op(scsi_cmd);
5337         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5338
5339         if (rc == SUCCESS)
5340                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5341         LEAVE;
5342         return rc;
5343 }
5344
5345 /**
5346  * ipr_handle_other_interrupt - Handle "other" interrupts
5347  * @ioa_cfg:    ioa config struct
5348  * @int_reg:    interrupt register
5349  *
5350  * Return value:
5351  *      IRQ_NONE / IRQ_HANDLED
5352  **/
5353 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5354                                               u32 int_reg)
5355 {
5356         irqreturn_t rc = IRQ_HANDLED;
5357         u32 int_mask_reg;
5358
5359         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5360         int_reg &= ~int_mask_reg;
5361
5362         /* If an interrupt on the adapter did not occur, ignore it.
5363          * Or in the case of SIS 64, check for a stage change interrupt.
5364          */
5365         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5366                 if (ioa_cfg->sis64) {
5367                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5368                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5369                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5370
5371                                 /* clear stage change */
5372                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5373                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5374                                 list_del(&ioa_cfg->reset_cmd->queue);
5375                                 del_timer(&ioa_cfg->reset_cmd->timer);
5376                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5377                                 return IRQ_HANDLED;
5378                         }
5379                 }
5380
5381                 return IRQ_NONE;
5382         }
5383
5384         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5385                 /* Mask the interrupt */
5386                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5387                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5388
5389                 list_del(&ioa_cfg->reset_cmd->queue);
5390                 del_timer(&ioa_cfg->reset_cmd->timer);
5391                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5392         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5393                 if (ioa_cfg->clear_isr) {
5394                         if (ipr_debug && printk_ratelimit())
5395                                 dev_err(&ioa_cfg->pdev->dev,
5396                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5397                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5398                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5399                         return IRQ_NONE;
5400                 }
5401         } else {
5402                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5403                         ioa_cfg->ioa_unit_checked = 1;
5404                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5405                         dev_err(&ioa_cfg->pdev->dev,
5406                                 "No Host RRQ. 0x%08X\n", int_reg);
5407                 else
5408                         dev_err(&ioa_cfg->pdev->dev,
5409                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5410
5411                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5412                         ioa_cfg->sdt_state = GET_DUMP;
5413
5414                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5415                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5416         }
5417
5418         return rc;
5419 }
5420
5421 /**
5422  * ipr_isr_eh - Interrupt service routine error handler
5423  * @ioa_cfg:    ioa config struct
5424  * @msg:        message to log
5425  *
5426  * Return value:
5427  *      none
5428  **/
5429 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5430 {
5431         ioa_cfg->errors_logged++;
5432         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5433
5434         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5435                 ioa_cfg->sdt_state = GET_DUMP;
5436
5437         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5438 }
5439
5440 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5441                                                 struct list_head *doneq)
5442 {
5443         u32 ioasc;
5444         u16 cmd_index;
5445         struct ipr_cmnd *ipr_cmd;
5446         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5447         int num_hrrq = 0;
5448
5449         /* If interrupts are disabled, ignore the interrupt */
5450         if (!hrr_queue->allow_interrupts)
5451                 return 0;
5452
5453         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5454                hrr_queue->toggle_bit) {
5455
5456                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5457                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5458                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5459
5460                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5461                              cmd_index < hrr_queue->min_cmd_id)) {
5462                         ipr_isr_eh(ioa_cfg,
5463                                 "Invalid response handle from IOA: ",
5464                                 cmd_index);
5465                         break;
5466                 }
5467
5468                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5469                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5470
5471                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5472
5473                 list_move_tail(&ipr_cmd->queue, doneq);
5474
5475                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5476                         hrr_queue->hrrq_curr++;
5477                 } else {
5478                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5479                         hrr_queue->toggle_bit ^= 1u;
5480                 }
5481                 num_hrrq++;
5482                 if (budget > 0 && num_hrrq >= budget)
5483                         break;
5484         }
5485
5486         return num_hrrq;
5487 }
5488
5489 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5490 {
5491         struct ipr_ioa_cfg *ioa_cfg;
5492         struct ipr_hrr_queue *hrrq;
5493         struct ipr_cmnd *ipr_cmd, *temp;
5494         unsigned long hrrq_flags;
5495         int completed_ops;
5496         LIST_HEAD(doneq);
5497
5498         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5499         ioa_cfg = hrrq->ioa_cfg;
5500
5501         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5502         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5503
5504         if (completed_ops < budget)
5505                 blk_iopoll_complete(iop);
5506         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5507
5508         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509                 list_del(&ipr_cmd->queue);
5510                 del_timer(&ipr_cmd->timer);
5511                 ipr_cmd->fast_done(ipr_cmd);
5512         }
5513
5514         return completed_ops;
5515 }
5516
5517 /**
5518  * ipr_isr - Interrupt service routine
5519  * @irq:        irq number
5520  * @devp:       pointer to ioa config struct
5521  *
5522  * Return value:
5523  *      IRQ_NONE / IRQ_HANDLED
5524  **/
5525 static irqreturn_t ipr_isr(int irq, void *devp)
5526 {
5527         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5528         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5529         unsigned long hrrq_flags = 0;
5530         u32 int_reg = 0;
5531         int num_hrrq = 0;
5532         int irq_none = 0;
5533         struct ipr_cmnd *ipr_cmd, *temp;
5534         irqreturn_t rc = IRQ_NONE;
5535         LIST_HEAD(doneq);
5536
5537         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5538         /* If interrupts are disabled, ignore the interrupt */
5539         if (!hrrq->allow_interrupts) {
5540                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5541                 return IRQ_NONE;
5542         }
5543
5544         while (1) {
5545                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5546                         rc =  IRQ_HANDLED;
5547
5548                         if (!ioa_cfg->clear_isr)
5549                                 break;
5550
5551                         /* Clear the PCI interrupt */
5552                         num_hrrq = 0;
5553                         do {
5554                                 writel(IPR_PCII_HRRQ_UPDATED,
5555                                      ioa_cfg->regs.clr_interrupt_reg32);
5556                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5557                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5558                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5559
5560                 } else if (rc == IRQ_NONE && irq_none == 0) {
5561                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5562                         irq_none++;
5563                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5564                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5565                         ipr_isr_eh(ioa_cfg,
5566                                 "Error clearing HRRQ: ", num_hrrq);
5567                         rc = IRQ_HANDLED;
5568                         break;
5569                 } else
5570                         break;
5571         }
5572
5573         if (unlikely(rc == IRQ_NONE))
5574                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5575
5576         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5577         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5578                 list_del(&ipr_cmd->queue);
5579                 del_timer(&ipr_cmd->timer);
5580                 ipr_cmd->fast_done(ipr_cmd);
5581         }
5582         return rc;
5583 }
5584
5585 /**
5586  * ipr_isr_mhrrq - Interrupt service routine
5587  * @irq:        irq number
5588  * @devp:       pointer to ioa config struct
5589  *
5590  * Return value:
5591  *      IRQ_NONE / IRQ_HANDLED
5592  **/
5593 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5594 {
5595         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5596         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5597         unsigned long hrrq_flags = 0;
5598         struct ipr_cmnd *ipr_cmd, *temp;
5599         irqreturn_t rc = IRQ_NONE;
5600         LIST_HEAD(doneq);
5601
5602         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5603
5604         /* If interrupts are disabled, ignore the interrupt */
5605         if (!hrrq->allow_interrupts) {
5606                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5607                 return IRQ_NONE;
5608         }
5609
5610         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5611                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5612                        hrrq->toggle_bit) {
5613                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5614                                 blk_iopoll_sched(&hrrq->iopoll);
5615                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5616                         return IRQ_HANDLED;
5617                 }
5618         } else {
5619                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5620                         hrrq->toggle_bit)
5621
5622                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5623                                 rc =  IRQ_HANDLED;
5624         }
5625
5626         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5627
5628         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5629                 list_del(&ipr_cmd->queue);
5630                 del_timer(&ipr_cmd->timer);
5631                 ipr_cmd->fast_done(ipr_cmd);
5632         }
5633         return rc;
5634 }
5635
5636 /**
5637  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5638  * @ioa_cfg:    ioa config struct
5639  * @ipr_cmd:    ipr command struct
5640  *
5641  * Return value:
5642  *      0 on success / -1 on failure
5643  **/
5644 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5645                              struct ipr_cmnd *ipr_cmd)
5646 {
5647         int i, nseg;
5648         struct scatterlist *sg;
5649         u32 length;
5650         u32 ioadl_flags = 0;
5651         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5652         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5653         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5654
5655         length = scsi_bufflen(scsi_cmd);
5656         if (!length)
5657                 return 0;
5658
5659         nseg = scsi_dma_map(scsi_cmd);
5660         if (nseg < 0) {
5661                 if (printk_ratelimit())
5662                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5663                 return -1;
5664         }
5665
5666         ipr_cmd->dma_use_sg = nseg;
5667
5668         ioarcb->data_transfer_length = cpu_to_be32(length);
5669         ioarcb->ioadl_len =
5670                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5671
5672         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5673                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5674                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5675         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5676                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5677
5678         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5679                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5680                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5681                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5682         }
5683
5684         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5685         return 0;
5686 }
5687
5688 /**
5689  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5690  * @ioa_cfg:    ioa config struct
5691  * @ipr_cmd:    ipr command struct
5692  *
5693  * Return value:
5694  *      0 on success / -1 on failure
5695  **/
5696 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5697                            struct ipr_cmnd *ipr_cmd)
5698 {
5699         int i, nseg;
5700         struct scatterlist *sg;
5701         u32 length;
5702         u32 ioadl_flags = 0;
5703         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5704         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5705         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5706
5707         length = scsi_bufflen(scsi_cmd);
5708         if (!length)
5709                 return 0;
5710
5711         nseg = scsi_dma_map(scsi_cmd);
5712         if (nseg < 0) {
5713                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5714                 return -1;
5715         }
5716
5717         ipr_cmd->dma_use_sg = nseg;
5718
5719         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5720                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5721                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5722                 ioarcb->data_transfer_length = cpu_to_be32(length);
5723                 ioarcb->ioadl_len =
5724                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5725         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5726                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5727                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5728                 ioarcb->read_ioadl_len =
5729                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5730         }
5731
5732         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5733                 ioadl = ioarcb->u.add_data.u.ioadl;
5734                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5735                                     offsetof(struct ipr_ioarcb, u.add_data));
5736                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5737         }
5738
5739         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5740                 ioadl[i].flags_and_data_len =
5741                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5742                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5743         }
5744
5745         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5746         return 0;
5747 }
5748
5749 /**
5750  * ipr_erp_done - Process completion of ERP for a device
5751  * @ipr_cmd:            ipr command struct
5752  *
5753  * This function copies the sense buffer into the scsi_cmd
5754  * struct and pushes the scsi_done function.
5755  *
5756  * Return value:
5757  *      nothing
5758  **/
5759 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5760 {
5761         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5762         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5763         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5764
5765         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5766                 scsi_cmd->result |= (DID_ERROR << 16);
5767                 scmd_printk(KERN_ERR, scsi_cmd,
5768                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5769         } else {
5770                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5771                        SCSI_SENSE_BUFFERSIZE);
5772         }
5773
5774         if (res) {
5775                 if (!ipr_is_naca_model(res))
5776                         res->needs_sync_complete = 1;
5777                 res->in_erp = 0;
5778         }
5779         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5780         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5781         scsi_cmd->scsi_done(scsi_cmd);
5782 }
5783
5784 /**
5785  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5786  * @ipr_cmd:    ipr command struct
5787  *
5788  * Return value:
5789  *      none
5790  **/
5791 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5792 {
5793         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5794         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5795         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5796
5797         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5798         ioarcb->data_transfer_length = 0;
5799         ioarcb->read_data_transfer_length = 0;
5800         ioarcb->ioadl_len = 0;
5801         ioarcb->read_ioadl_len = 0;
5802         ioasa->hdr.ioasc = 0;
5803         ioasa->hdr.residual_data_len = 0;
5804
5805         if (ipr_cmd->ioa_cfg->sis64)
5806                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5807                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5808         else {
5809                 ioarcb->write_ioadl_addr =
5810                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5811                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5812         }
5813 }
5814
5815 /**
5816  * ipr_erp_request_sense - Send request sense to a device
5817  * @ipr_cmd:    ipr command struct
5818  *
5819  * This function sends a request sense to a device as a result
5820  * of a check condition.
5821  *
5822  * Return value:
5823  *      nothing
5824  **/
5825 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5826 {
5827         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5828         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5829
5830         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5831                 ipr_erp_done(ipr_cmd);
5832                 return;
5833         }
5834
5835         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5836
5837         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5838         cmd_pkt->cdb[0] = REQUEST_SENSE;
5839         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5840         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5841         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5842         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5843
5844         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5845                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5846
5847         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5848                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5849 }
5850
5851 /**
5852  * ipr_erp_cancel_all - Send cancel all to a device
5853  * @ipr_cmd:    ipr command struct
5854  *
5855  * This function sends a cancel all to a device to clear the
5856  * queue. If we are running TCQ on the device, QERR is set to 1,
5857  * which means all outstanding ops have been dropped on the floor.
5858  * Cancel all will return them to us.
5859  *
5860  * Return value:
5861  *      nothing
5862  **/
5863 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5864 {
5865         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5866         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5867         struct ipr_cmd_pkt *cmd_pkt;
5868
5869         res->in_erp = 1;
5870
5871         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5872
5873         if (!scsi_cmd->device->simple_tags) {
5874                 ipr_erp_request_sense(ipr_cmd);
5875                 return;
5876         }
5877
5878         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5879         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5880         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5881
5882         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5883                    IPR_CANCEL_ALL_TIMEOUT);
5884 }
5885
5886 /**
5887  * ipr_dump_ioasa - Dump contents of IOASA
5888  * @ioa_cfg:    ioa config struct
5889  * @ipr_cmd:    ipr command struct
5890  * @res:                resource entry struct
5891  *
5892  * This function is invoked by the interrupt handler when ops
5893  * fail. It will log the IOASA if appropriate. Only called
5894  * for GPDD ops.
5895  *
5896  * Return value:
5897  *      none
5898  **/
5899 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5900                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5901 {
5902         int i;
5903         u16 data_len;
5904         u32 ioasc, fd_ioasc;
5905         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5906         __be32 *ioasa_data = (__be32 *)ioasa;
5907         int error_index;
5908
5909         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5910         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5911
5912         if (0 == ioasc)
5913                 return;
5914
5915         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5916                 return;
5917
5918         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5919                 error_index = ipr_get_error(fd_ioasc);
5920         else
5921                 error_index = ipr_get_error(ioasc);
5922
5923         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5924                 /* Don't log an error if the IOA already logged one */
5925                 if (ioasa->hdr.ilid != 0)
5926                         return;
5927
5928                 if (!ipr_is_gscsi(res))
5929                         return;
5930
5931                 if (ipr_error_table[error_index].log_ioasa == 0)
5932                         return;
5933         }
5934
5935         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5936
5937         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5938         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5939                 data_len = sizeof(struct ipr_ioasa64);
5940         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5941                 data_len = sizeof(struct ipr_ioasa);
5942
5943         ipr_err("IOASA Dump:\n");
5944
5945         for (i = 0; i < data_len / 4; i += 4) {
5946                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5947                         be32_to_cpu(ioasa_data[i]),
5948                         be32_to_cpu(ioasa_data[i+1]),
5949                         be32_to_cpu(ioasa_data[i+2]),
5950                         be32_to_cpu(ioasa_data[i+3]));
5951         }
5952 }
5953
5954 /**
5955  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5956  * @ioasa:              IOASA
5957  * @sense_buf:  sense data buffer
5958  *
5959  * Return value:
5960  *      none
5961  **/
5962 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5963 {
5964         u32 failing_lba;
5965         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5966         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5967         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5968         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5969
5970         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5971
5972         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5973                 return;
5974
5975         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5976
5977         if (ipr_is_vset_device(res) &&
5978             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5979             ioasa->u.vset.failing_lba_hi != 0) {
5980                 sense_buf[0] = 0x72;
5981                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5982                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5983                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5984
5985                 sense_buf[7] = 12;
5986                 sense_buf[8] = 0;
5987                 sense_buf[9] = 0x0A;
5988                 sense_buf[10] = 0x80;
5989
5990                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5991
5992                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5993                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5994                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5995                 sense_buf[15] = failing_lba & 0x000000ff;
5996
5997                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5998
5999                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6000                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6001                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6002                 sense_buf[19] = failing_lba & 0x000000ff;
6003         } else {
6004                 sense_buf[0] = 0x70;
6005                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6006                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6007                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6008
6009                 /* Illegal request */
6010                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6011                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6012                         sense_buf[7] = 10;      /* additional length */
6013
6014                         /* IOARCB was in error */
6015                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6016                                 sense_buf[15] = 0xC0;
6017                         else    /* Parameter data was invalid */
6018                                 sense_buf[15] = 0x80;
6019
6020                         sense_buf[16] =
6021                             ((IPR_FIELD_POINTER_MASK &
6022                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6023                         sense_buf[17] =
6024                             (IPR_FIELD_POINTER_MASK &
6025                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6026                 } else {
6027                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6028                                 if (ipr_is_vset_device(res))
6029                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6030                                 else
6031                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6032
6033                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6034                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6035                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6036                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6037                                 sense_buf[6] = failing_lba & 0x000000ff;
6038                         }
6039
6040                         sense_buf[7] = 6;       /* additional length */
6041                 }
6042         }
6043 }
6044
6045 /**
6046  * ipr_get_autosense - Copy autosense data to sense buffer
6047  * @ipr_cmd:    ipr command struct
6048  *
6049  * This function copies the autosense buffer to the buffer
6050  * in the scsi_cmd, if there is autosense available.
6051  *
6052  * Return value:
6053  *      1 if autosense was available / 0 if not
6054  **/
6055 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6056 {
6057         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6058         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6059
6060         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6061                 return 0;
6062
6063         if (ipr_cmd->ioa_cfg->sis64)
6064                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6065                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6066                            SCSI_SENSE_BUFFERSIZE));
6067         else
6068                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6069                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6070                            SCSI_SENSE_BUFFERSIZE));
6071         return 1;
6072 }
6073
6074 /**
6075  * ipr_erp_start - Process an error response for a SCSI op
6076  * @ioa_cfg:    ioa config struct
6077  * @ipr_cmd:    ipr command struct
6078  *
6079  * This function determines whether or not to initiate ERP
6080  * on the affected device.
6081  *
6082  * Return value:
6083  *      nothing
6084  **/
6085 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6086                               struct ipr_cmnd *ipr_cmd)
6087 {
6088         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6089         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6090         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6091         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6092
6093         if (!res) {
6094                 ipr_scsi_eh_done(ipr_cmd);
6095                 return;
6096         }
6097
6098         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6099                 ipr_gen_sense(ipr_cmd);
6100
6101         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6102
6103         switch (masked_ioasc) {
6104         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6105                 if (ipr_is_naca_model(res))
6106                         scsi_cmd->result |= (DID_ABORT << 16);
6107                 else
6108                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6109                 break;
6110         case IPR_IOASC_IR_RESOURCE_HANDLE:
6111         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6112                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6113                 break;
6114         case IPR_IOASC_HW_SEL_TIMEOUT:
6115                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6116                 if (!ipr_is_naca_model(res))
6117                         res->needs_sync_complete = 1;
6118                 break;
6119         case IPR_IOASC_SYNC_REQUIRED:
6120                 if (!res->in_erp)
6121                         res->needs_sync_complete = 1;
6122                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6123                 break;
6124         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6125         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6126                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6127                 break;
6128         case IPR_IOASC_BUS_WAS_RESET:
6129         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6130                 /*
6131                  * Report the bus reset and ask for a retry. The device
6132                  * will give CC/UA the next command.
6133                  */
6134                 if (!res->resetting_device)
6135                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6136                 scsi_cmd->result |= (DID_ERROR << 16);
6137                 if (!ipr_is_naca_model(res))
6138                         res->needs_sync_complete = 1;
6139                 break;
6140         case IPR_IOASC_HW_DEV_BUS_STATUS:
6141                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6142                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6143                         if (!ipr_get_autosense(ipr_cmd)) {
6144                                 if (!ipr_is_naca_model(res)) {
6145                                         ipr_erp_cancel_all(ipr_cmd);
6146                                         return;
6147                                 }
6148                         }
6149                 }
6150                 if (!ipr_is_naca_model(res))
6151                         res->needs_sync_complete = 1;
6152                 break;
6153         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6154                 break;
6155         default:
6156                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6157                         scsi_cmd->result |= (DID_ERROR << 16);
6158                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6159                         res->needs_sync_complete = 1;
6160                 break;
6161         }
6162
6163         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6164         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6165         scsi_cmd->scsi_done(scsi_cmd);
6166 }
6167
6168 /**
6169  * ipr_scsi_done - mid-layer done function
6170  * @ipr_cmd:    ipr command struct
6171  *
6172  * This function is invoked by the interrupt handler for
6173  * ops generated by the SCSI mid-layer
6174  *
6175  * Return value:
6176  *      none
6177  **/
6178 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6179 {
6180         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6181         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6182         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6183         unsigned long hrrq_flags;
6184
6185         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6186
6187         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6188                 scsi_dma_unmap(scsi_cmd);
6189
6190                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6191                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6192                 scsi_cmd->scsi_done(scsi_cmd);
6193                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6194         } else {
6195                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6196                 ipr_erp_start(ioa_cfg, ipr_cmd);
6197                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6198         }
6199 }
6200
6201 /**
6202  * ipr_queuecommand - Queue a mid-layer request
6203  * @shost:              scsi host struct
6204  * @scsi_cmd:   scsi command struct
6205  *
6206  * This function queues a request generated by the mid-layer.
6207  *
6208  * Return value:
6209  *      0 on success
6210  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6211  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6212  **/
6213 static int ipr_queuecommand(struct Scsi_Host *shost,
6214                             struct scsi_cmnd *scsi_cmd)
6215 {
6216         struct ipr_ioa_cfg *ioa_cfg;
6217         struct ipr_resource_entry *res;
6218         struct ipr_ioarcb *ioarcb;
6219         struct ipr_cmnd *ipr_cmd;
6220         unsigned long hrrq_flags, lock_flags;
6221         int rc;
6222         struct ipr_hrr_queue *hrrq;
6223         int hrrq_id;
6224
6225         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6226
6227         scsi_cmd->result = (DID_OK << 16);
6228         res = scsi_cmd->device->hostdata;
6229
6230         if (ipr_is_gata(res) && res->sata_port) {
6231                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6232                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6233                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6234                 return rc;
6235         }
6236
6237         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6238         hrrq = &ioa_cfg->hrrq[hrrq_id];
6239
6240         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6241         /*
6242          * We are currently blocking all devices due to a host reset
6243          * We have told the host to stop giving us new requests, but
6244          * ERP ops don't count. FIXME
6245          */
6246         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6247                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6248                 return SCSI_MLQUEUE_HOST_BUSY;
6249         }
6250
6251         /*
6252          * FIXME - Create scsi_set_host_offline interface
6253          *  and the ioa_is_dead check can be removed
6254          */
6255         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6256                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6257                 goto err_nodev;
6258         }
6259
6260         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6261         if (ipr_cmd == NULL) {
6262                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6263                 return SCSI_MLQUEUE_HOST_BUSY;
6264         }
6265         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6266
6267         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6268         ioarcb = &ipr_cmd->ioarcb;
6269
6270         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6271         ipr_cmd->scsi_cmd = scsi_cmd;
6272         ipr_cmd->done = ipr_scsi_eh_done;
6273
6274         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6275                 if (scsi_cmd->underflow == 0)
6276                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6277
6278                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6279                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6280                         res->reset_occurred = 0;
6281                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6282                 }
6283                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6284                 if (scsi_cmd->flags & SCMD_TAGGED)
6285                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6286                 else
6287                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6288         }
6289
6290         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6291             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6292                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6293         }
6294
6295         if (ioa_cfg->sis64)
6296                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6297         else
6298                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6299
6300         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6301         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6302                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6303                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6304                 if (!rc)
6305                         scsi_dma_unmap(scsi_cmd);
6306                 return SCSI_MLQUEUE_HOST_BUSY;
6307         }
6308
6309         if (unlikely(hrrq->ioa_is_dead)) {
6310                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6311                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6312                 scsi_dma_unmap(scsi_cmd);
6313                 goto err_nodev;
6314         }
6315
6316         ioarcb->res_handle = res->res_handle;
6317         if (res->needs_sync_complete) {
6318                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6319                 res->needs_sync_complete = 0;
6320         }
6321         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6322         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6323         ipr_send_command(ipr_cmd);
6324         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6325         return 0;
6326
6327 err_nodev:
6328         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6329         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6330         scsi_cmd->result = (DID_NO_CONNECT << 16);
6331         scsi_cmd->scsi_done(scsi_cmd);
6332         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6333         return 0;
6334 }
6335
6336 /**
6337  * ipr_ioctl - IOCTL handler
6338  * @sdev:       scsi device struct
6339  * @cmd:        IOCTL cmd
6340  * @arg:        IOCTL arg
6341  *
6342  * Return value:
6343  *      0 on success / other on failure
6344  **/
6345 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6346 {
6347         struct ipr_resource_entry *res;
6348
6349         res = (struct ipr_resource_entry *)sdev->hostdata;
6350         if (res && ipr_is_gata(res)) {
6351                 if (cmd == HDIO_GET_IDENTITY)
6352                         return -ENOTTY;
6353                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6354         }
6355
6356         return -EINVAL;
6357 }
6358
6359 /**
6360  * ipr_info - Get information about the card/driver
6361  * @scsi_host:  scsi host struct
6362  *
6363  * Return value:
6364  *      pointer to buffer with description string
6365  **/
6366 static const char *ipr_ioa_info(struct Scsi_Host *host)
6367 {
6368         static char buffer[512];
6369         struct ipr_ioa_cfg *ioa_cfg;
6370         unsigned long lock_flags = 0;
6371
6372         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6373
6374         spin_lock_irqsave(host->host_lock, lock_flags);
6375         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6376         spin_unlock_irqrestore(host->host_lock, lock_flags);
6377
6378         return buffer;
6379 }
6380
6381 static struct scsi_host_template driver_template = {
6382         .module = THIS_MODULE,
6383         .name = "IPR",
6384         .info = ipr_ioa_info,
6385         .ioctl = ipr_ioctl,
6386         .queuecommand = ipr_queuecommand,
6387         .eh_abort_handler = ipr_eh_abort,
6388         .eh_device_reset_handler = ipr_eh_dev_reset,
6389         .eh_host_reset_handler = ipr_eh_host_reset,
6390         .slave_alloc = ipr_slave_alloc,
6391         .slave_configure = ipr_slave_configure,
6392         .slave_destroy = ipr_slave_destroy,
6393         .scan_finished = ipr_scan_finished,
6394         .target_alloc = ipr_target_alloc,
6395         .target_destroy = ipr_target_destroy,
6396         .change_queue_depth = ipr_change_queue_depth,
6397         .bios_param = ipr_biosparam,
6398         .can_queue = IPR_MAX_COMMANDS,
6399         .this_id = -1,
6400         .sg_tablesize = IPR_MAX_SGLIST,
6401         .max_sectors = IPR_IOA_MAX_SECTORS,
6402         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6403         .use_clustering = ENABLE_CLUSTERING,
6404         .shost_attrs = ipr_ioa_attrs,
6405         .sdev_attrs = ipr_dev_attrs,
6406         .proc_name = IPR_NAME,
6407         .no_write_same = 1,
6408         .use_blk_tags = 1,
6409 };
6410
6411 /**
6412  * ipr_ata_phy_reset - libata phy_reset handler
6413  * @ap:         ata port to reset
6414  *
6415  **/
6416 static void ipr_ata_phy_reset(struct ata_port *ap)
6417 {
6418         unsigned long flags;
6419         struct ipr_sata_port *sata_port = ap->private_data;
6420         struct ipr_resource_entry *res = sata_port->res;
6421         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6422         int rc;
6423
6424         ENTER;
6425         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6426         while (ioa_cfg->in_reset_reload) {
6427                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6428                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6429                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6430         }
6431
6432         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6433                 goto out_unlock;
6434
6435         rc = ipr_device_reset(ioa_cfg, res);
6436
6437         if (rc) {
6438                 ap->link.device[0].class = ATA_DEV_NONE;
6439                 goto out_unlock;
6440         }
6441
6442         ap->link.device[0].class = res->ata_class;
6443         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6444                 ap->link.device[0].class = ATA_DEV_NONE;
6445
6446 out_unlock:
6447         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6448         LEAVE;
6449 }
6450
6451 /**
6452  * ipr_ata_post_internal - Cleanup after an internal command
6453  * @qc: ATA queued command
6454  *
6455  * Return value:
6456  *      none
6457  **/
6458 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6459 {
6460         struct ipr_sata_port *sata_port = qc->ap->private_data;
6461         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6462         struct ipr_cmnd *ipr_cmd;
6463         struct ipr_hrr_queue *hrrq;
6464         unsigned long flags;
6465
6466         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6467         while (ioa_cfg->in_reset_reload) {
6468                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6469                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6470                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6471         }
6472
6473         for_each_hrrq(hrrq, ioa_cfg) {
6474                 spin_lock(&hrrq->_lock);
6475                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6476                         if (ipr_cmd->qc == qc) {
6477                                 ipr_device_reset(ioa_cfg, sata_port->res);
6478                                 break;
6479                         }
6480                 }
6481                 spin_unlock(&hrrq->_lock);
6482         }
6483         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6484 }
6485
6486 /**
6487  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6488  * @regs:       destination
6489  * @tf: source ATA taskfile
6490  *
6491  * Return value:
6492  *      none
6493  **/
6494 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6495                              struct ata_taskfile *tf)
6496 {
6497         regs->feature = tf->feature;
6498         regs->nsect = tf->nsect;
6499         regs->lbal = tf->lbal;
6500         regs->lbam = tf->lbam;
6501         regs->lbah = tf->lbah;
6502         regs->device = tf->device;
6503         regs->command = tf->command;
6504         regs->hob_feature = tf->hob_feature;
6505         regs->hob_nsect = tf->hob_nsect;
6506         regs->hob_lbal = tf->hob_lbal;
6507         regs->hob_lbam = tf->hob_lbam;
6508         regs->hob_lbah = tf->hob_lbah;
6509         regs->ctl = tf->ctl;
6510 }
6511
6512 /**
6513  * ipr_sata_done - done function for SATA commands
6514  * @ipr_cmd:    ipr command struct
6515  *
6516  * This function is invoked by the interrupt handler for
6517  * ops generated by the SCSI mid-layer to SATA devices
6518  *
6519  * Return value:
6520  *      none
6521  **/
6522 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6523 {
6524         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6525         struct ata_queued_cmd *qc = ipr_cmd->qc;
6526         struct ipr_sata_port *sata_port = qc->ap->private_data;
6527         struct ipr_resource_entry *res = sata_port->res;
6528         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6529
6530         spin_lock(&ipr_cmd->hrrq->_lock);
6531         if (ipr_cmd->ioa_cfg->sis64)
6532                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6533                        sizeof(struct ipr_ioasa_gata));
6534         else
6535                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6536                        sizeof(struct ipr_ioasa_gata));
6537         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6538
6539         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6540                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6541
6542         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6543                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6544         else
6545                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6546         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6547         spin_unlock(&ipr_cmd->hrrq->_lock);
6548         ata_qc_complete(qc);
6549 }
6550
6551 /**
6552  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6553  * @ipr_cmd:    ipr command struct
6554  * @qc:         ATA queued command
6555  *
6556  **/
6557 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6558                                   struct ata_queued_cmd *qc)
6559 {
6560         u32 ioadl_flags = 0;
6561         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6562         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6563         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6564         int len = qc->nbytes;
6565         struct scatterlist *sg;
6566         unsigned int si;
6567         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6568
6569         if (len == 0)
6570                 return;
6571
6572         if (qc->dma_dir == DMA_TO_DEVICE) {
6573                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6574                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6575         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6576                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6577
6578         ioarcb->data_transfer_length = cpu_to_be32(len);
6579         ioarcb->ioadl_len =
6580                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6581         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6582                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6583
6584         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6585                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6586                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6587                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6588
6589                 last_ioadl64 = ioadl64;
6590                 ioadl64++;
6591         }
6592
6593         if (likely(last_ioadl64))
6594                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6595 }
6596
6597 /**
6598  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6599  * @ipr_cmd:    ipr command struct
6600  * @qc:         ATA queued command
6601  *
6602  **/
6603 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6604                                 struct ata_queued_cmd *qc)
6605 {
6606         u32 ioadl_flags = 0;
6607         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6608         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6609         struct ipr_ioadl_desc *last_ioadl = NULL;
6610         int len = qc->nbytes;
6611         struct scatterlist *sg;
6612         unsigned int si;
6613
6614         if (len == 0)
6615                 return;
6616
6617         if (qc->dma_dir == DMA_TO_DEVICE) {
6618                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6619                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6620                 ioarcb->data_transfer_length = cpu_to_be32(len);
6621                 ioarcb->ioadl_len =
6622                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6623         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6624                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6625                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6626                 ioarcb->read_ioadl_len =
6627                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6628         }
6629
6630         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6631                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6632                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6633
6634                 last_ioadl = ioadl;
6635                 ioadl++;
6636         }
6637
6638         if (likely(last_ioadl))
6639                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6640 }
6641
6642 /**
6643  * ipr_qc_defer - Get a free ipr_cmd
6644  * @qc: queued command
6645  *
6646  * Return value:
6647  *      0 if success
6648  **/
6649 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6650 {
6651         struct ata_port *ap = qc->ap;
6652         struct ipr_sata_port *sata_port = ap->private_data;
6653         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6654         struct ipr_cmnd *ipr_cmd;
6655         struct ipr_hrr_queue *hrrq;
6656         int hrrq_id;
6657
6658         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6659         hrrq = &ioa_cfg->hrrq[hrrq_id];
6660
6661         qc->lldd_task = NULL;
6662         spin_lock(&hrrq->_lock);
6663         if (unlikely(hrrq->ioa_is_dead)) {
6664                 spin_unlock(&hrrq->_lock);
6665                 return 0;
6666         }
6667
6668         if (unlikely(!hrrq->allow_cmds)) {
6669                 spin_unlock(&hrrq->_lock);
6670                 return ATA_DEFER_LINK;
6671         }
6672
6673         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6674         if (ipr_cmd == NULL) {
6675                 spin_unlock(&hrrq->_lock);
6676                 return ATA_DEFER_LINK;
6677         }
6678
6679         qc->lldd_task = ipr_cmd;
6680         spin_unlock(&hrrq->_lock);
6681         return 0;
6682 }
6683
6684 /**
6685  * ipr_qc_issue - Issue a SATA qc to a device
6686  * @qc: queued command
6687  *
6688  * Return value:
6689  *      0 if success
6690  **/
6691 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6692 {
6693         struct ata_port *ap = qc->ap;
6694         struct ipr_sata_port *sata_port = ap->private_data;
6695         struct ipr_resource_entry *res = sata_port->res;
6696         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6697         struct ipr_cmnd *ipr_cmd;
6698         struct ipr_ioarcb *ioarcb;
6699         struct ipr_ioarcb_ata_regs *regs;
6700
6701         if (qc->lldd_task == NULL)
6702                 ipr_qc_defer(qc);
6703
6704         ipr_cmd = qc->lldd_task;
6705         if (ipr_cmd == NULL)
6706                 return AC_ERR_SYSTEM;
6707
6708         qc->lldd_task = NULL;
6709         spin_lock(&ipr_cmd->hrrq->_lock);
6710         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6711                         ipr_cmd->hrrq->ioa_is_dead)) {
6712                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6713                 spin_unlock(&ipr_cmd->hrrq->_lock);
6714                 return AC_ERR_SYSTEM;
6715         }
6716
6717         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6718         ioarcb = &ipr_cmd->ioarcb;
6719
6720         if (ioa_cfg->sis64) {
6721                 regs = &ipr_cmd->i.ata_ioadl.regs;
6722                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6723         } else
6724                 regs = &ioarcb->u.add_data.u.regs;
6725
6726         memset(regs, 0, sizeof(*regs));
6727         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6728
6729         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6730         ipr_cmd->qc = qc;
6731         ipr_cmd->done = ipr_sata_done;
6732         ipr_cmd->ioarcb.res_handle = res->res_handle;
6733         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6734         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6735         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6736         ipr_cmd->dma_use_sg = qc->n_elem;
6737
6738         if (ioa_cfg->sis64)
6739                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6740         else
6741                 ipr_build_ata_ioadl(ipr_cmd, qc);
6742
6743         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6744         ipr_copy_sata_tf(regs, &qc->tf);
6745         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6746         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6747
6748         switch (qc->tf.protocol) {
6749         case ATA_PROT_NODATA:
6750         case ATA_PROT_PIO:
6751                 break;
6752
6753         case ATA_PROT_DMA:
6754                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6755                 break;
6756
6757         case ATAPI_PROT_PIO:
6758         case ATAPI_PROT_NODATA:
6759                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6760                 break;
6761
6762         case ATAPI_PROT_DMA:
6763                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6764                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6765                 break;
6766
6767         default:
6768                 WARN_ON(1);
6769                 spin_unlock(&ipr_cmd->hrrq->_lock);
6770                 return AC_ERR_INVALID;
6771         }
6772
6773         ipr_send_command(ipr_cmd);
6774         spin_unlock(&ipr_cmd->hrrq->_lock);
6775
6776         return 0;
6777 }
6778
6779 /**
6780  * ipr_qc_fill_rtf - Read result TF
6781  * @qc: ATA queued command
6782  *
6783  * Return value:
6784  *      true
6785  **/
6786 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6787 {
6788         struct ipr_sata_port *sata_port = qc->ap->private_data;
6789         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6790         struct ata_taskfile *tf = &qc->result_tf;
6791
6792         tf->feature = g->error;
6793         tf->nsect = g->nsect;
6794         tf->lbal = g->lbal;
6795         tf->lbam = g->lbam;
6796         tf->lbah = g->lbah;
6797         tf->device = g->device;
6798         tf->command = g->status;
6799         tf->hob_nsect = g->hob_nsect;
6800         tf->hob_lbal = g->hob_lbal;
6801         tf->hob_lbam = g->hob_lbam;
6802         tf->hob_lbah = g->hob_lbah;
6803
6804         return true;
6805 }
6806
6807 static struct ata_port_operations ipr_sata_ops = {
6808         .phy_reset = ipr_ata_phy_reset,
6809         .hardreset = ipr_sata_reset,
6810         .post_internal_cmd = ipr_ata_post_internal,
6811         .qc_prep = ata_noop_qc_prep,
6812         .qc_defer = ipr_qc_defer,
6813         .qc_issue = ipr_qc_issue,
6814         .qc_fill_rtf = ipr_qc_fill_rtf,
6815         .port_start = ata_sas_port_start,
6816         .port_stop = ata_sas_port_stop
6817 };
6818
6819 static struct ata_port_info sata_port_info = {
6820         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6821         .pio_mask       = ATA_PIO4_ONLY,
6822         .mwdma_mask     = ATA_MWDMA2,
6823         .udma_mask      = ATA_UDMA6,
6824         .port_ops       = &ipr_sata_ops
6825 };
6826
6827 #ifdef CONFIG_PPC_PSERIES
6828 static const u16 ipr_blocked_processors[] = {
6829         PVR_NORTHSTAR,
6830         PVR_PULSAR,
6831         PVR_POWER4,
6832         PVR_ICESTAR,
6833         PVR_SSTAR,
6834         PVR_POWER4p,
6835         PVR_630,
6836         PVR_630p
6837 };
6838
6839 /**
6840  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6841  * @ioa_cfg:    ioa cfg struct
6842  *
6843  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6844  * certain pSeries hardware. This function determines if the given
6845  * adapter is in one of these confgurations or not.
6846  *
6847  * Return value:
6848  *      1 if adapter is not supported / 0 if adapter is supported
6849  **/
6850 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6851 {
6852         int i;
6853
6854         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6855                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6856                         if (pvr_version_is(ipr_blocked_processors[i]))
6857                                 return 1;
6858                 }
6859         }
6860         return 0;
6861 }
6862 #else
6863 #define ipr_invalid_adapter(ioa_cfg) 0
6864 #endif
6865
6866 /**
6867  * ipr_ioa_bringdown_done - IOA bring down completion.
6868  * @ipr_cmd:    ipr command struct
6869  *
6870  * This function processes the completion of an adapter bring down.
6871  * It wakes any reset sleepers.
6872  *
6873  * Return value:
6874  *      IPR_RC_JOB_RETURN
6875  **/
6876 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6877 {
6878         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6879         int i;
6880
6881         ENTER;
6882         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6883                 ipr_trace;
6884                 spin_unlock_irq(ioa_cfg->host->host_lock);
6885                 scsi_unblock_requests(ioa_cfg->host);
6886                 spin_lock_irq(ioa_cfg->host->host_lock);
6887         }
6888
6889         ioa_cfg->in_reset_reload = 0;
6890         ioa_cfg->reset_retries = 0;
6891         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6892                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6893                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6894                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6895         }
6896         wmb();
6897
6898         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6899         wake_up_all(&ioa_cfg->reset_wait_q);
6900         LEAVE;
6901
6902         return IPR_RC_JOB_RETURN;
6903 }
6904
6905 /**
6906  * ipr_ioa_reset_done - IOA reset completion.
6907  * @ipr_cmd:    ipr command struct
6908  *
6909  * This function processes the completion of an adapter reset.
6910  * It schedules any necessary mid-layer add/removes and
6911  * wakes any reset sleepers.
6912  *
6913  * Return value:
6914  *      IPR_RC_JOB_RETURN
6915  **/
6916 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6917 {
6918         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6919         struct ipr_resource_entry *res;
6920         struct ipr_hostrcb *hostrcb, *temp;
6921         int i = 0, j;
6922
6923         ENTER;
6924         ioa_cfg->in_reset_reload = 0;
6925         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6926                 spin_lock(&ioa_cfg->hrrq[j]._lock);
6927                 ioa_cfg->hrrq[j].allow_cmds = 1;
6928                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6929         }
6930         wmb();
6931         ioa_cfg->reset_cmd = NULL;
6932         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6933
6934         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6935                 if (res->add_to_ml || res->del_from_ml) {
6936                         ipr_trace;
6937                         break;
6938                 }
6939         }
6940         schedule_work(&ioa_cfg->work_q);
6941
6942         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6943                 list_del(&hostrcb->queue);
6944                 if (i++ < IPR_NUM_LOG_HCAMS)
6945                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6946                 else
6947                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6948         }
6949
6950         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6951         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6952
6953         ioa_cfg->reset_retries = 0;
6954         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6955         wake_up_all(&ioa_cfg->reset_wait_q);
6956
6957         spin_unlock(ioa_cfg->host->host_lock);
6958         scsi_unblock_requests(ioa_cfg->host);
6959         spin_lock(ioa_cfg->host->host_lock);
6960
6961         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6962                 scsi_block_requests(ioa_cfg->host);
6963
6964         schedule_work(&ioa_cfg->work_q);
6965         LEAVE;
6966         return IPR_RC_JOB_RETURN;
6967 }
6968
6969 /**
6970  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6971  * @supported_dev:      supported device struct
6972  * @vpids:                      vendor product id struct
6973  *
6974  * Return value:
6975  *      none
6976  **/
6977 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6978                                  struct ipr_std_inq_vpids *vpids)
6979 {
6980         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6981         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6982         supported_dev->num_records = 1;
6983         supported_dev->data_length =
6984                 cpu_to_be16(sizeof(struct ipr_supported_device));
6985         supported_dev->reserved = 0;
6986 }
6987
6988 /**
6989  * ipr_set_supported_devs - Send Set Supported Devices for a device
6990  * @ipr_cmd:    ipr command struct
6991  *
6992  * This function sends a Set Supported Devices to the adapter
6993  *
6994  * Return value:
6995  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6996  **/
6997 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6998 {
6999         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7000         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7001         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7002         struct ipr_resource_entry *res = ipr_cmd->u.res;
7003
7004         ipr_cmd->job_step = ipr_ioa_reset_done;
7005
7006         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7007                 if (!ipr_is_scsi_disk(res))
7008                         continue;
7009
7010                 ipr_cmd->u.res = res;
7011                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7012
7013                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7014                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7015                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7016
7017                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7018                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7019                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7020                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7021
7022                 ipr_init_ioadl(ipr_cmd,
7023                                ioa_cfg->vpd_cbs_dma +
7024                                  offsetof(struct ipr_misc_cbs, supp_dev),
7025                                sizeof(struct ipr_supported_device),
7026                                IPR_IOADL_FLAGS_WRITE_LAST);
7027
7028                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7029                            IPR_SET_SUP_DEVICE_TIMEOUT);
7030
7031                 if (!ioa_cfg->sis64)
7032                         ipr_cmd->job_step = ipr_set_supported_devs;
7033                 LEAVE;
7034                 return IPR_RC_JOB_RETURN;
7035         }
7036
7037         LEAVE;
7038         return IPR_RC_JOB_CONTINUE;
7039 }
7040
7041 /**
7042  * ipr_get_mode_page - Locate specified mode page
7043  * @mode_pages: mode page buffer
7044  * @page_code:  page code to find
7045  * @len:                minimum required length for mode page
7046  *
7047  * Return value:
7048  *      pointer to mode page / NULL on failure
7049  **/
7050 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7051                                u32 page_code, u32 len)
7052 {
7053         struct ipr_mode_page_hdr *mode_hdr;
7054         u32 page_length;
7055         u32 length;
7056
7057         if (!mode_pages || (mode_pages->hdr.length == 0))
7058                 return NULL;
7059
7060         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7061         mode_hdr = (struct ipr_mode_page_hdr *)
7062                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7063
7064         while (length) {
7065                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7066                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7067                                 return mode_hdr;
7068                         break;
7069                 } else {
7070                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7071                                        mode_hdr->page_length);
7072                         length -= page_length;
7073                         mode_hdr = (struct ipr_mode_page_hdr *)
7074                                 ((unsigned long)mode_hdr + page_length);
7075                 }
7076         }
7077         return NULL;
7078 }
7079
7080 /**
7081  * ipr_check_term_power - Check for term power errors
7082  * @ioa_cfg:    ioa config struct
7083  * @mode_pages: IOAFP mode pages buffer
7084  *
7085  * Check the IOAFP's mode page 28 for term power errors
7086  *
7087  * Return value:
7088  *      nothing
7089  **/
7090 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7091                                  struct ipr_mode_pages *mode_pages)
7092 {
7093         int i;
7094         int entry_length;
7095         struct ipr_dev_bus_entry *bus;
7096         struct ipr_mode_page28 *mode_page;
7097
7098         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7099                                       sizeof(struct ipr_mode_page28));
7100
7101         entry_length = mode_page->entry_length;
7102
7103         bus = mode_page->bus;
7104
7105         for (i = 0; i < mode_page->num_entries; i++) {
7106                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7107                         dev_err(&ioa_cfg->pdev->dev,
7108                                 "Term power is absent on scsi bus %d\n",
7109                                 bus->res_addr.bus);
7110                 }
7111
7112                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7113         }
7114 }
7115
7116 /**
7117  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7118  * @ioa_cfg:    ioa config struct
7119  *
7120  * Looks through the config table checking for SES devices. If
7121  * the SES device is in the SES table indicating a maximum SCSI
7122  * bus speed, the speed is limited for the bus.
7123  *
7124  * Return value:
7125  *      none
7126  **/
7127 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7128 {
7129         u32 max_xfer_rate;
7130         int i;
7131
7132         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7133                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7134                                                        ioa_cfg->bus_attr[i].bus_width);
7135
7136                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7137                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7138         }
7139 }
7140
7141 /**
7142  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7143  * @ioa_cfg:    ioa config struct
7144  * @mode_pages: mode page 28 buffer
7145  *
7146  * Updates mode page 28 based on driver configuration
7147  *
7148  * Return value:
7149  *      none
7150  **/
7151 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7152                                           struct ipr_mode_pages *mode_pages)
7153 {
7154         int i, entry_length;
7155         struct ipr_dev_bus_entry *bus;
7156         struct ipr_bus_attributes *bus_attr;
7157         struct ipr_mode_page28 *mode_page;
7158
7159         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7160                                       sizeof(struct ipr_mode_page28));
7161
7162         entry_length = mode_page->entry_length;
7163
7164         /* Loop for each device bus entry */
7165         for (i = 0, bus = mode_page->bus;
7166              i < mode_page->num_entries;
7167              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7168                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7169                         dev_err(&ioa_cfg->pdev->dev,
7170                                 "Invalid resource address reported: 0x%08X\n",
7171                                 IPR_GET_PHYS_LOC(bus->res_addr));
7172                         continue;
7173                 }
7174
7175                 bus_attr = &ioa_cfg->bus_attr[i];
7176                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7177                 bus->bus_width = bus_attr->bus_width;
7178                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7179                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7180                 if (bus_attr->qas_enabled)
7181                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7182                 else
7183                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7184         }
7185 }
7186
7187 /**
7188  * ipr_build_mode_select - Build a mode select command
7189  * @ipr_cmd:    ipr command struct
7190  * @res_handle: resource handle to send command to
7191  * @parm:               Byte 2 of Mode Sense command
7192  * @dma_addr:   DMA buffer address
7193  * @xfer_len:   data transfer length
7194  *
7195  * Return value:
7196  *      none
7197  **/
7198 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7199                                   __be32 res_handle, u8 parm,
7200                                   dma_addr_t dma_addr, u8 xfer_len)
7201 {
7202         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7203
7204         ioarcb->res_handle = res_handle;
7205         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7206         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7207         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7208         ioarcb->cmd_pkt.cdb[1] = parm;
7209         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7210
7211         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7212 }
7213
7214 /**
7215  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7216  * @ipr_cmd:    ipr command struct
7217  *
7218  * This function sets up the SCSI bus attributes and sends
7219  * a Mode Select for Page 28 to activate them.
7220  *
7221  * Return value:
7222  *      IPR_RC_JOB_RETURN
7223  **/
7224 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7225 {
7226         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7227         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7228         int length;
7229
7230         ENTER;
7231         ipr_scsi_bus_speed_limit(ioa_cfg);
7232         ipr_check_term_power(ioa_cfg, mode_pages);
7233         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7234         length = mode_pages->hdr.length + 1;
7235         mode_pages->hdr.length = 0;
7236
7237         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7238                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7239                               length);
7240
7241         ipr_cmd->job_step = ipr_set_supported_devs;
7242         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7243                                     struct ipr_resource_entry, queue);
7244         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7245
7246         LEAVE;
7247         return IPR_RC_JOB_RETURN;
7248 }
7249
7250 /**
7251  * ipr_build_mode_sense - Builds a mode sense command
7252  * @ipr_cmd:    ipr command struct
7253  * @res:                resource entry struct
7254  * @parm:               Byte 2 of mode sense command
7255  * @dma_addr:   DMA address of mode sense buffer
7256  * @xfer_len:   Size of DMA buffer
7257  *
7258  * Return value:
7259  *      none
7260  **/
7261 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7262                                  __be32 res_handle,
7263                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7264 {
7265         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7266
7267         ioarcb->res_handle = res_handle;
7268         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7269         ioarcb->cmd_pkt.cdb[2] = parm;
7270         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7271         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7272
7273         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7274 }
7275
7276 /**
7277  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7278  * @ipr_cmd:    ipr command struct
7279  *
7280  * This function handles the failure of an IOA bringup command.
7281  *
7282  * Return value:
7283  *      IPR_RC_JOB_RETURN
7284  **/
7285 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7286 {
7287         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7288         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7289
7290         dev_err(&ioa_cfg->pdev->dev,
7291                 "0x%02X failed with IOASC: 0x%08X\n",
7292                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7293
7294         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7295         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7296         return IPR_RC_JOB_RETURN;
7297 }
7298
7299 /**
7300  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7301  * @ipr_cmd:    ipr command struct
7302  *
7303  * This function handles the failure of a Mode Sense to the IOAFP.
7304  * Some adapters do not handle all mode pages.
7305  *
7306  * Return value:
7307  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7308  **/
7309 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7310 {
7311         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7312         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7313
7314         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7315                 ipr_cmd->job_step = ipr_set_supported_devs;
7316                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7317                                             struct ipr_resource_entry, queue);
7318                 return IPR_RC_JOB_CONTINUE;
7319         }
7320
7321         return ipr_reset_cmd_failed(ipr_cmd);
7322 }
7323
7324 /**
7325  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7326  * @ipr_cmd:    ipr command struct
7327  *
7328  * This function send a Page 28 mode sense to the IOA to
7329  * retrieve SCSI bus attributes.
7330  *
7331  * Return value:
7332  *      IPR_RC_JOB_RETURN
7333  **/
7334 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7335 {
7336         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7337
7338         ENTER;
7339         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7340                              0x28, ioa_cfg->vpd_cbs_dma +
7341                              offsetof(struct ipr_misc_cbs, mode_pages),
7342                              sizeof(struct ipr_mode_pages));
7343
7344         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7345         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7346
7347         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7348
7349         LEAVE;
7350         return IPR_RC_JOB_RETURN;
7351 }
7352
7353 /**
7354  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7355  * @ipr_cmd:    ipr command struct
7356  *
7357  * This function enables dual IOA RAID support if possible.
7358  *
7359  * Return value:
7360  *      IPR_RC_JOB_RETURN
7361  **/
7362 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7363 {
7364         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7365         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7366         struct ipr_mode_page24 *mode_page;
7367         int length;
7368
7369         ENTER;
7370         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7371                                       sizeof(struct ipr_mode_page24));
7372
7373         if (mode_page)
7374                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7375
7376         length = mode_pages->hdr.length + 1;
7377         mode_pages->hdr.length = 0;
7378
7379         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7380                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7381                               length);
7382
7383         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7384         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7385
7386         LEAVE;
7387         return IPR_RC_JOB_RETURN;
7388 }
7389
7390 /**
7391  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7392  * @ipr_cmd:    ipr command struct
7393  *
7394  * This function handles the failure of a Mode Sense to the IOAFP.
7395  * Some adapters do not handle all mode pages.
7396  *
7397  * Return value:
7398  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7399  **/
7400 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7401 {
7402         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7403
7404         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7405                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7406                 return IPR_RC_JOB_CONTINUE;
7407         }
7408
7409         return ipr_reset_cmd_failed(ipr_cmd);
7410 }
7411
7412 /**
7413  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7414  * @ipr_cmd:    ipr command struct
7415  *
7416  * This function send a mode sense to the IOA to retrieve
7417  * the IOA Advanced Function Control mode page.
7418  *
7419  * Return value:
7420  *      IPR_RC_JOB_RETURN
7421  **/
7422 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7423 {
7424         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7425
7426         ENTER;
7427         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7428                              0x24, ioa_cfg->vpd_cbs_dma +
7429                              offsetof(struct ipr_misc_cbs, mode_pages),
7430                              sizeof(struct ipr_mode_pages));
7431
7432         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7433         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7434
7435         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7436
7437         LEAVE;
7438         return IPR_RC_JOB_RETURN;
7439 }
7440
7441 /**
7442  * ipr_init_res_table - Initialize the resource table
7443  * @ipr_cmd:    ipr command struct
7444  *
7445  * This function looks through the existing resource table, comparing
7446  * it with the config table. This function will take care of old/new
7447  * devices and schedule adding/removing them from the mid-layer
7448  * as appropriate.
7449  *
7450  * Return value:
7451  *      IPR_RC_JOB_CONTINUE
7452  **/
7453 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7454 {
7455         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7456         struct ipr_resource_entry *res, *temp;
7457         struct ipr_config_table_entry_wrapper cfgtew;
7458         int entries, found, flag, i;
7459         LIST_HEAD(old_res);
7460
7461         ENTER;
7462         if (ioa_cfg->sis64)
7463                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7464         else
7465                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7466
7467         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7468                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7469
7470         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7471                 list_move_tail(&res->queue, &old_res);
7472
7473         if (ioa_cfg->sis64)
7474                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7475         else
7476                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7477
7478         for (i = 0; i < entries; i++) {
7479                 if (ioa_cfg->sis64)
7480                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7481                 else
7482                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7483                 found = 0;
7484
7485                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7486                         if (ipr_is_same_device(res, &cfgtew)) {
7487                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7488                                 found = 1;
7489                                 break;
7490                         }
7491                 }
7492
7493                 if (!found) {
7494                         if (list_empty(&ioa_cfg->free_res_q)) {
7495                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7496                                 break;
7497                         }
7498
7499                         found = 1;
7500                         res = list_entry(ioa_cfg->free_res_q.next,
7501                                          struct ipr_resource_entry, queue);
7502                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7503                         ipr_init_res_entry(res, &cfgtew);
7504                         res->add_to_ml = 1;
7505                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7506                         res->sdev->allow_restart = 1;
7507
7508                 if (found)
7509                         ipr_update_res_entry(res, &cfgtew);
7510         }
7511
7512         list_for_each_entry_safe(res, temp, &old_res, queue) {
7513                 if (res->sdev) {
7514                         res->del_from_ml = 1;
7515                         res->res_handle = IPR_INVALID_RES_HANDLE;
7516                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7517                 }
7518         }
7519
7520         list_for_each_entry_safe(res, temp, &old_res, queue) {
7521                 ipr_clear_res_target(res);
7522                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7523         }
7524
7525         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7526                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7527         else
7528                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7529
7530         LEAVE;
7531         return IPR_RC_JOB_CONTINUE;
7532 }
7533
7534 /**
7535  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7536  * @ipr_cmd:    ipr command struct
7537  *
7538  * This function sends a Query IOA Configuration command
7539  * to the adapter to retrieve the IOA configuration table.
7540  *
7541  * Return value:
7542  *      IPR_RC_JOB_RETURN
7543  **/
7544 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7545 {
7546         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7547         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7548         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7549         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7550
7551         ENTER;
7552         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7553                 ioa_cfg->dual_raid = 1;
7554         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7555                  ucode_vpd->major_release, ucode_vpd->card_type,
7556                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7557         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7558         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7559
7560         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7561         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7562         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7563         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7564
7565         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7566                        IPR_IOADL_FLAGS_READ_LAST);
7567
7568         ipr_cmd->job_step = ipr_init_res_table;
7569
7570         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7571
7572         LEAVE;
7573         return IPR_RC_JOB_RETURN;
7574 }
7575
7576 /**
7577  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7578  * @ipr_cmd:    ipr command struct
7579  *
7580  * This utility function sends an inquiry to the adapter.
7581  *
7582  * Return value:
7583  *      none
7584  **/
7585 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7586                               dma_addr_t dma_addr, u8 xfer_len)
7587 {
7588         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7589
7590         ENTER;
7591         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7592         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7593
7594         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7595         ioarcb->cmd_pkt.cdb[1] = flags;
7596         ioarcb->cmd_pkt.cdb[2] = page;
7597         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7598
7599         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7600
7601         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7602         LEAVE;
7603 }
7604
7605 /**
7606  * ipr_inquiry_page_supported - Is the given inquiry page supported
7607  * @page0:              inquiry page 0 buffer
7608  * @page:               page code.
7609  *
7610  * This function determines if the specified inquiry page is supported.
7611  *
7612  * Return value:
7613  *      1 if page is supported / 0 if not
7614  **/
7615 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7616 {
7617         int i;
7618
7619         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7620                 if (page0->page[i] == page)
7621                         return 1;
7622
7623         return 0;
7624 }
7625
7626 /**
7627  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7628  * @ipr_cmd:    ipr command struct
7629  *
7630  * This function sends a Page 0xD0 inquiry to the adapter
7631  * to retrieve adapter capabilities.
7632  *
7633  * Return value:
7634  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7635  **/
7636 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7637 {
7638         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7639         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7640         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7641
7642         ENTER;
7643         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7644         memset(cap, 0, sizeof(*cap));
7645
7646         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7647                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7648                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7649                                   sizeof(struct ipr_inquiry_cap));
7650                 return IPR_RC_JOB_RETURN;
7651         }
7652
7653         LEAVE;
7654         return IPR_RC_JOB_CONTINUE;
7655 }
7656
7657 /**
7658  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7659  * @ipr_cmd:    ipr command struct
7660  *
7661  * This function sends a Page 3 inquiry to the adapter
7662  * to retrieve software VPD information.
7663  *
7664  * Return value:
7665  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7666  **/
7667 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7668 {
7669         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7670
7671         ENTER;
7672
7673         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7674
7675         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7676                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7677                           sizeof(struct ipr_inquiry_page3));
7678
7679         LEAVE;
7680         return IPR_RC_JOB_RETURN;
7681 }
7682
7683 /**
7684  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7685  * @ipr_cmd:    ipr command struct
7686  *
7687  * This function sends a Page 0 inquiry to the adapter
7688  * to retrieve supported inquiry pages.
7689  *
7690  * Return value:
7691  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7692  **/
7693 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7694 {
7695         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7696         char type[5];
7697
7698         ENTER;
7699
7700         /* Grab the type out of the VPD and store it away */
7701         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7702         type[4] = '\0';
7703         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7704
7705         if (ipr_invalid_adapter(ioa_cfg)) {
7706                 dev_err(&ioa_cfg->pdev->dev,
7707                         "Adapter not supported in this hardware configuration.\n");
7708
7709                 if (!ipr_testmode) {
7710                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7711                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7712                         list_add_tail(&ipr_cmd->queue,
7713                                         &ioa_cfg->hrrq->hrrq_free_q);
7714                         return IPR_RC_JOB_RETURN;
7715                 }
7716         }
7717
7718         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7719
7720         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7721                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7722                           sizeof(struct ipr_inquiry_page0));
7723
7724         LEAVE;
7725         return IPR_RC_JOB_RETURN;
7726 }
7727
7728 /**
7729  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7730  * @ipr_cmd:    ipr command struct
7731  *
7732  * This function sends a standard inquiry to the adapter.
7733  *
7734  * Return value:
7735  *      IPR_RC_JOB_RETURN
7736  **/
7737 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7738 {
7739         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7740
7741         ENTER;
7742         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7743
7744         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7745                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7746                           sizeof(struct ipr_ioa_vpd));
7747
7748         LEAVE;
7749         return IPR_RC_JOB_RETURN;
7750 }
7751
7752 /**
7753  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7754  * @ipr_cmd:    ipr command struct
7755  *
7756  * This function send an Identify Host Request Response Queue
7757  * command to establish the HRRQ with the adapter.
7758  *
7759  * Return value:
7760  *      IPR_RC_JOB_RETURN
7761  **/
7762 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7763 {
7764         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7765         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7766         struct ipr_hrr_queue *hrrq;
7767
7768         ENTER;
7769         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7770         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7771
7772         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7773                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7774
7775                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7776                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7777
7778                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7779                 if (ioa_cfg->sis64)
7780                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7781
7782                 if (ioa_cfg->nvectors == 1)
7783                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7784                 else
7785                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7786
7787                 ioarcb->cmd_pkt.cdb[2] =
7788                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7789                 ioarcb->cmd_pkt.cdb[3] =
7790                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7791                 ioarcb->cmd_pkt.cdb[4] =
7792                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7793                 ioarcb->cmd_pkt.cdb[5] =
7794                         ((u64) hrrq->host_rrq_dma) & 0xff;
7795                 ioarcb->cmd_pkt.cdb[7] =
7796                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7797                 ioarcb->cmd_pkt.cdb[8] =
7798                         (sizeof(u32) * hrrq->size) & 0xff;
7799
7800                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7801                         ioarcb->cmd_pkt.cdb[9] =
7802                                         ioa_cfg->identify_hrrq_index;
7803
7804                 if (ioa_cfg->sis64) {
7805                         ioarcb->cmd_pkt.cdb[10] =
7806                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7807                         ioarcb->cmd_pkt.cdb[11] =
7808                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7809                         ioarcb->cmd_pkt.cdb[12] =
7810                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7811                         ioarcb->cmd_pkt.cdb[13] =
7812                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7813                 }
7814
7815                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7816                         ioarcb->cmd_pkt.cdb[14] =
7817                                         ioa_cfg->identify_hrrq_index;
7818
7819                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7820                            IPR_INTERNAL_TIMEOUT);
7821
7822                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7823                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7824
7825                 LEAVE;
7826                 return IPR_RC_JOB_RETURN;
7827         }
7828
7829         LEAVE;
7830         return IPR_RC_JOB_CONTINUE;
7831 }
7832
7833 /**
7834  * ipr_reset_timer_done - Adapter reset timer function
7835  * @ipr_cmd:    ipr command struct
7836  *
7837  * Description: This function is used in adapter reset processing
7838  * for timing events. If the reset_cmd pointer in the IOA
7839  * config struct is not this adapter's we are doing nested
7840  * resets and fail_all_ops will take care of freeing the
7841  * command block.
7842  *
7843  * Return value:
7844  *      none
7845  **/
7846 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7847 {
7848         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7849         unsigned long lock_flags = 0;
7850
7851         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7852
7853         if (ioa_cfg->reset_cmd == ipr_cmd) {
7854                 list_del(&ipr_cmd->queue);
7855                 ipr_cmd->done(ipr_cmd);
7856         }
7857
7858         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7859 }
7860
7861 /**
7862  * ipr_reset_start_timer - Start a timer for adapter reset job
7863  * @ipr_cmd:    ipr command struct
7864  * @timeout:    timeout value
7865  *
7866  * Description: This function is used in adapter reset processing
7867  * for timing events. If the reset_cmd pointer in the IOA
7868  * config struct is not this adapter's we are doing nested
7869  * resets and fail_all_ops will take care of freeing the
7870  * command block.
7871  *
7872  * Return value:
7873  *      none
7874  **/
7875 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7876                                   unsigned long timeout)
7877 {
7878
7879         ENTER;
7880         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7881         ipr_cmd->done = ipr_reset_ioa_job;
7882
7883         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7884         ipr_cmd->timer.expires = jiffies + timeout;
7885         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7886         add_timer(&ipr_cmd->timer);
7887 }
7888
7889 /**
7890  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7891  * @ioa_cfg:    ioa cfg struct
7892  *
7893  * Return value:
7894  *      nothing
7895  **/
7896 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7897 {
7898         struct ipr_hrr_queue *hrrq;
7899
7900         for_each_hrrq(hrrq, ioa_cfg) {
7901                 spin_lock(&hrrq->_lock);
7902                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7903
7904                 /* Initialize Host RRQ pointers */
7905                 hrrq->hrrq_start = hrrq->host_rrq;
7906                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7907                 hrrq->hrrq_curr = hrrq->hrrq_start;
7908                 hrrq->toggle_bit = 1;
7909                 spin_unlock(&hrrq->_lock);
7910         }
7911         wmb();
7912
7913         ioa_cfg->identify_hrrq_index = 0;
7914         if (ioa_cfg->hrrq_num == 1)
7915                 atomic_set(&ioa_cfg->hrrq_index, 0);
7916         else
7917                 atomic_set(&ioa_cfg->hrrq_index, 1);
7918
7919         /* Zero out config table */
7920         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7921 }
7922
7923 /**
7924  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7925  * @ipr_cmd:    ipr command struct
7926  *
7927  * Return value:
7928  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7929  **/
7930 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7931 {
7932         unsigned long stage, stage_time;
7933         u32 feedback;
7934         volatile u32 int_reg;
7935         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7936         u64 maskval = 0;
7937
7938         feedback = readl(ioa_cfg->regs.init_feedback_reg);
7939         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7940         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7941
7942         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7943
7944         /* sanity check the stage_time value */
7945         if (stage_time == 0)
7946                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7947         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7948                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7949         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7950                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7951
7952         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7953                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7954                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7955                 stage_time = ioa_cfg->transop_timeout;
7956                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7957         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7958                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7959                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7960                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7961                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
7962                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7963                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7964                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7965                         return IPR_RC_JOB_CONTINUE;
7966                 }
7967         }
7968
7969         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7970         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7971         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7972         ipr_cmd->done = ipr_reset_ioa_job;
7973         add_timer(&ipr_cmd->timer);
7974
7975         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7976
7977         return IPR_RC_JOB_RETURN;
7978 }
7979
7980 /**
7981  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7982  * @ipr_cmd:    ipr command struct
7983  *
7984  * This function reinitializes some control blocks and
7985  * enables destructive diagnostics on the adapter.
7986  *
7987  * Return value:
7988  *      IPR_RC_JOB_RETURN
7989  **/
7990 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7991 {
7992         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7993         volatile u32 int_reg;
7994         volatile u64 maskval;
7995         int i;
7996
7997         ENTER;
7998         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7999         ipr_init_ioa_mem(ioa_cfg);
8000
8001         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8002                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8003                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8004                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8005         }
8006         wmb();
8007         if (ioa_cfg->sis64) {
8008                 /* Set the adapter to the correct endian mode. */
8009                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8010                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8011         }
8012
8013         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8014
8015         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8016                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8017                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8018                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8019                 return IPR_RC_JOB_CONTINUE;
8020         }
8021
8022         /* Enable destructive diagnostics on IOA */
8023         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8024
8025         if (ioa_cfg->sis64) {
8026                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8027                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8028                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8029         } else
8030                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8031
8032         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8033
8034         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8035
8036         if (ioa_cfg->sis64) {
8037                 ipr_cmd->job_step = ipr_reset_next_stage;
8038                 return IPR_RC_JOB_CONTINUE;
8039         }
8040
8041         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8042         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8043         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8044         ipr_cmd->done = ipr_reset_ioa_job;
8045         add_timer(&ipr_cmd->timer);
8046         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8047
8048         LEAVE;
8049         return IPR_RC_JOB_RETURN;
8050 }
8051
8052 /**
8053  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8054  * @ipr_cmd:    ipr command struct
8055  *
8056  * This function is invoked when an adapter dump has run out
8057  * of processing time.
8058  *
8059  * Return value:
8060  *      IPR_RC_JOB_CONTINUE
8061  **/
8062 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8063 {
8064         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8065
8066         if (ioa_cfg->sdt_state == GET_DUMP)
8067                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8068         else if (ioa_cfg->sdt_state == READ_DUMP)
8069                 ioa_cfg->sdt_state = ABORT_DUMP;
8070
8071         ioa_cfg->dump_timeout = 1;
8072         ipr_cmd->job_step = ipr_reset_alert;
8073
8074         return IPR_RC_JOB_CONTINUE;
8075 }
8076
8077 /**
8078  * ipr_unit_check_no_data - Log a unit check/no data error log
8079  * @ioa_cfg:            ioa config struct
8080  *
8081  * Logs an error indicating the adapter unit checked, but for some
8082  * reason, we were unable to fetch the unit check buffer.
8083  *
8084  * Return value:
8085  *      nothing
8086  **/
8087 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8088 {
8089         ioa_cfg->errors_logged++;
8090         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8091 }
8092
8093 /**
8094  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8095  * @ioa_cfg:            ioa config struct
8096  *
8097  * Fetches the unit check buffer from the adapter by clocking the data
8098  * through the mailbox register.
8099  *
8100  * Return value:
8101  *      nothing
8102  **/
8103 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8104 {
8105         unsigned long mailbox;
8106         struct ipr_hostrcb *hostrcb;
8107         struct ipr_uc_sdt sdt;
8108         int rc, length;
8109         u32 ioasc;
8110
8111         mailbox = readl(ioa_cfg->ioa_mailbox);
8112
8113         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8114                 ipr_unit_check_no_data(ioa_cfg);
8115                 return;
8116         }
8117
8118         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8119         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8120                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8121
8122         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8123             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8124             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8125                 ipr_unit_check_no_data(ioa_cfg);
8126                 return;
8127         }
8128
8129         /* Find length of the first sdt entry (UC buffer) */
8130         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8131                 length = be32_to_cpu(sdt.entry[0].end_token);
8132         else
8133                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8134                           be32_to_cpu(sdt.entry[0].start_token)) &
8135                           IPR_FMT2_MBX_ADDR_MASK;
8136
8137         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8138                              struct ipr_hostrcb, queue);
8139         list_del(&hostrcb->queue);
8140         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8141
8142         rc = ipr_get_ldump_data_section(ioa_cfg,
8143                                         be32_to_cpu(sdt.entry[0].start_token),
8144                                         (__be32 *)&hostrcb->hcam,
8145                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8146
8147         if (!rc) {
8148                 ipr_handle_log_data(ioa_cfg, hostrcb);
8149                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8150                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8151                     ioa_cfg->sdt_state == GET_DUMP)
8152                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8153         } else
8154                 ipr_unit_check_no_data(ioa_cfg);
8155
8156         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8157 }
8158
8159 /**
8160  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8161  * @ipr_cmd:    ipr command struct
8162  *
8163  * Description: This function will call to get the unit check buffer.
8164  *
8165  * Return value:
8166  *      IPR_RC_JOB_RETURN
8167  **/
8168 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8169 {
8170         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8171
8172         ENTER;
8173         ioa_cfg->ioa_unit_checked = 0;
8174         ipr_get_unit_check_buffer(ioa_cfg);
8175         ipr_cmd->job_step = ipr_reset_alert;
8176         ipr_reset_start_timer(ipr_cmd, 0);
8177
8178         LEAVE;
8179         return IPR_RC_JOB_RETURN;
8180 }
8181
8182 /**
8183  * ipr_reset_restore_cfg_space - Restore PCI config space.
8184  * @ipr_cmd:    ipr command struct
8185  *
8186  * Description: This function restores the saved PCI config space of
8187  * the adapter, fails all outstanding ops back to the callers, and
8188  * fetches the dump/unit check if applicable to this reset.
8189  *
8190  * Return value:
8191  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8192  **/
8193 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8194 {
8195         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8196         u32 int_reg;
8197
8198         ENTER;
8199         ioa_cfg->pdev->state_saved = true;
8200         pci_restore_state(ioa_cfg->pdev);
8201
8202         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8203                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8204                 return IPR_RC_JOB_CONTINUE;
8205         }
8206
8207         ipr_fail_all_ops(ioa_cfg);
8208
8209         if (ioa_cfg->sis64) {
8210                 /* Set the adapter to the correct endian mode. */
8211                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8212                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8213         }
8214
8215         if (ioa_cfg->ioa_unit_checked) {
8216                 if (ioa_cfg->sis64) {
8217                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8218                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8219                         return IPR_RC_JOB_RETURN;
8220                 } else {
8221                         ioa_cfg->ioa_unit_checked = 0;
8222                         ipr_get_unit_check_buffer(ioa_cfg);
8223                         ipr_cmd->job_step = ipr_reset_alert;
8224                         ipr_reset_start_timer(ipr_cmd, 0);
8225                         return IPR_RC_JOB_RETURN;
8226                 }
8227         }
8228
8229         if (ioa_cfg->in_ioa_bringdown) {
8230                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8231         } else {
8232                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8233
8234                 if (GET_DUMP == ioa_cfg->sdt_state) {
8235                         ioa_cfg->sdt_state = READ_DUMP;
8236                         ioa_cfg->dump_timeout = 0;
8237                         if (ioa_cfg->sis64)
8238                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8239                         else
8240                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8241                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8242                         schedule_work(&ioa_cfg->work_q);
8243                         return IPR_RC_JOB_RETURN;
8244                 }
8245         }
8246
8247         LEAVE;
8248         return IPR_RC_JOB_CONTINUE;
8249 }
8250
8251 /**
8252  * ipr_reset_bist_done - BIST has completed on the adapter.
8253  * @ipr_cmd:    ipr command struct
8254  *
8255  * Description: Unblock config space and resume the reset process.
8256  *
8257  * Return value:
8258  *      IPR_RC_JOB_CONTINUE
8259  **/
8260 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8261 {
8262         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8263
8264         ENTER;
8265         if (ioa_cfg->cfg_locked)
8266                 pci_cfg_access_unlock(ioa_cfg->pdev);
8267         ioa_cfg->cfg_locked = 0;
8268         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8269         LEAVE;
8270         return IPR_RC_JOB_CONTINUE;
8271 }
8272
8273 /**
8274  * ipr_reset_start_bist - Run BIST on the adapter.
8275  * @ipr_cmd:    ipr command struct
8276  *
8277  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8278  *
8279  * Return value:
8280  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8281  **/
8282 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8283 {
8284         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8285         int rc = PCIBIOS_SUCCESSFUL;
8286
8287         ENTER;
8288         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8289                 writel(IPR_UPROCI_SIS64_START_BIST,
8290                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8291         else
8292                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8293
8294         if (rc == PCIBIOS_SUCCESSFUL) {
8295                 ipr_cmd->job_step = ipr_reset_bist_done;
8296                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8297                 rc = IPR_RC_JOB_RETURN;
8298         } else {
8299                 if (ioa_cfg->cfg_locked)
8300                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8301                 ioa_cfg->cfg_locked = 0;
8302                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8303                 rc = IPR_RC_JOB_CONTINUE;
8304         }
8305
8306         LEAVE;
8307         return rc;
8308 }
8309
8310 /**
8311  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8312  * @ipr_cmd:    ipr command struct
8313  *
8314  * Description: This clears PCI reset to the adapter and delays two seconds.
8315  *
8316  * Return value:
8317  *      IPR_RC_JOB_RETURN
8318  **/
8319 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8320 {
8321         ENTER;
8322         ipr_cmd->job_step = ipr_reset_bist_done;
8323         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8324         LEAVE;
8325         return IPR_RC_JOB_RETURN;
8326 }
8327
8328 /**
8329  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8330  * @work:       work struct
8331  *
8332  * Description: This pulses warm reset to a slot.
8333  *
8334  **/
8335 static void ipr_reset_reset_work(struct work_struct *work)
8336 {
8337         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8338         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8339         struct pci_dev *pdev = ioa_cfg->pdev;
8340         unsigned long lock_flags = 0;
8341
8342         ENTER;
8343         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8344         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8345         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8346
8347         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8348         if (ioa_cfg->reset_cmd == ipr_cmd)
8349                 ipr_reset_ioa_job(ipr_cmd);
8350         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8351         LEAVE;
8352 }
8353
8354 /**
8355  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8356  * @ipr_cmd:    ipr command struct
8357  *
8358  * Description: This asserts PCI reset to the adapter.
8359  *
8360  * Return value:
8361  *      IPR_RC_JOB_RETURN
8362  **/
8363 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8364 {
8365         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8366
8367         ENTER;
8368         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8369         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8370         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8371         LEAVE;
8372         return IPR_RC_JOB_RETURN;
8373 }
8374
8375 /**
8376  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8377  * @ipr_cmd:    ipr command struct
8378  *
8379  * Description: This attempts to block config access to the IOA.
8380  *
8381  * Return value:
8382  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8383  **/
8384 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8385 {
8386         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8387         int rc = IPR_RC_JOB_CONTINUE;
8388
8389         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8390                 ioa_cfg->cfg_locked = 1;
8391                 ipr_cmd->job_step = ioa_cfg->reset;
8392         } else {
8393                 if (ipr_cmd->u.time_left) {
8394                         rc = IPR_RC_JOB_RETURN;
8395                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8396                         ipr_reset_start_timer(ipr_cmd,
8397                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8398                 } else {
8399                         ipr_cmd->job_step = ioa_cfg->reset;
8400                         dev_err(&ioa_cfg->pdev->dev,
8401                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8402                 }
8403         }
8404
8405         return rc;
8406 }
8407
8408 /**
8409  * ipr_reset_block_config_access - Block config access to the IOA
8410  * @ipr_cmd:    ipr command struct
8411  *
8412  * Description: This attempts to block config access to the IOA
8413  *
8414  * Return value:
8415  *      IPR_RC_JOB_CONTINUE
8416  **/
8417 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8418 {
8419         ipr_cmd->ioa_cfg->cfg_locked = 0;
8420         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8421         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8422         return IPR_RC_JOB_CONTINUE;
8423 }
8424
8425 /**
8426  * ipr_reset_allowed - Query whether or not IOA can be reset
8427  * @ioa_cfg:    ioa config struct
8428  *
8429  * Return value:
8430  *      0 if reset not allowed / non-zero if reset is allowed
8431  **/
8432 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8433 {
8434         volatile u32 temp_reg;
8435
8436         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8437         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8438 }
8439
8440 /**
8441  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8442  * @ipr_cmd:    ipr command struct
8443  *
8444  * Description: This function waits for adapter permission to run BIST,
8445  * then runs BIST. If the adapter does not give permission after a
8446  * reasonable time, we will reset the adapter anyway. The impact of
8447  * resetting the adapter without warning the adapter is the risk of
8448  * losing the persistent error log on the adapter. If the adapter is
8449  * reset while it is writing to the flash on the adapter, the flash
8450  * segment will have bad ECC and be zeroed.
8451  *
8452  * Return value:
8453  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8454  **/
8455 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8456 {
8457         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8458         int rc = IPR_RC_JOB_RETURN;
8459
8460         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8461                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8462                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8463         } else {
8464                 ipr_cmd->job_step = ipr_reset_block_config_access;
8465                 rc = IPR_RC_JOB_CONTINUE;
8466         }
8467
8468         return rc;
8469 }
8470
8471 /**
8472  * ipr_reset_alert - Alert the adapter of a pending reset
8473  * @ipr_cmd:    ipr command struct
8474  *
8475  * Description: This function alerts the adapter that it will be reset.
8476  * If memory space is not currently enabled, proceed directly
8477  * to running BIST on the adapter. The timer must always be started
8478  * so we guarantee we do not run BIST from ipr_isr.
8479  *
8480  * Return value:
8481  *      IPR_RC_JOB_RETURN
8482  **/
8483 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8484 {
8485         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8486         u16 cmd_reg;
8487         int rc;
8488
8489         ENTER;
8490         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8491
8492         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8493                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8494                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8495                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8496         } else {
8497                 ipr_cmd->job_step = ipr_reset_block_config_access;
8498         }
8499
8500         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8501         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8502
8503         LEAVE;
8504         return IPR_RC_JOB_RETURN;
8505 }
8506
8507 /**
8508  * ipr_reset_quiesce_done - Complete IOA disconnect
8509  * @ipr_cmd:    ipr command struct
8510  *
8511  * Description: Freeze the adapter to complete quiesce processing
8512  *
8513  * Return value:
8514  *      IPR_RC_JOB_CONTINUE
8515  **/
8516 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8517 {
8518         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8519
8520         ENTER;
8521         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8522         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8523         LEAVE;
8524         return IPR_RC_JOB_CONTINUE;
8525 }
8526
8527 /**
8528  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8529  * @ipr_cmd:    ipr command struct
8530  *
8531  * Description: Ensure nothing is outstanding to the IOA and
8532  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8533  *
8534  * Return value:
8535  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8536  **/
8537 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8538 {
8539         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8540         struct ipr_cmnd *loop_cmd;
8541         struct ipr_hrr_queue *hrrq;
8542         int rc = IPR_RC_JOB_CONTINUE;
8543         int count = 0;
8544
8545         ENTER;
8546         ipr_cmd->job_step = ipr_reset_quiesce_done;
8547
8548         for_each_hrrq(hrrq, ioa_cfg) {
8549                 spin_lock(&hrrq->_lock);
8550                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8551                         count++;
8552                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8553                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8554                         rc = IPR_RC_JOB_RETURN;
8555                         break;
8556                 }
8557                 spin_unlock(&hrrq->_lock);
8558
8559                 if (count)
8560                         break;
8561         }
8562
8563         LEAVE;
8564         return rc;
8565 }
8566
8567 /**
8568  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8569  * @ipr_cmd:    ipr command struct
8570  *
8571  * Description: Cancel any oustanding HCAMs to the IOA.
8572  *
8573  * Return value:
8574  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8575  **/
8576 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8577 {
8578         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8579         int rc = IPR_RC_JOB_CONTINUE;
8580         struct ipr_cmd_pkt *cmd_pkt;
8581         struct ipr_cmnd *hcam_cmd;
8582         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8583
8584         ENTER;
8585         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8586
8587         if (!hrrq->ioa_is_dead) {
8588                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8589                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8590                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8591                                         continue;
8592
8593                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8594                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8595                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8596                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8597                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8598                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8599                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8600                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8601                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8602                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8603                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8604                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8605                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8606                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8607
8608                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8609                                            IPR_CANCEL_TIMEOUT);
8610
8611                                 rc = IPR_RC_JOB_RETURN;
8612                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8613                                 break;
8614                         }
8615                 }
8616         } else
8617                 ipr_cmd->job_step = ipr_reset_alert;
8618
8619         LEAVE;
8620         return rc;
8621 }
8622
8623 /**
8624  * ipr_reset_ucode_download_done - Microcode download completion
8625  * @ipr_cmd:    ipr command struct
8626  *
8627  * Description: This function unmaps the microcode download buffer.
8628  *
8629  * Return value:
8630  *      IPR_RC_JOB_CONTINUE
8631  **/
8632 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8633 {
8634         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8635         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8636
8637         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8638                      sglist->num_sg, DMA_TO_DEVICE);
8639
8640         ipr_cmd->job_step = ipr_reset_alert;
8641         return IPR_RC_JOB_CONTINUE;
8642 }
8643
8644 /**
8645  * ipr_reset_ucode_download - Download microcode to the adapter
8646  * @ipr_cmd:    ipr command struct
8647  *
8648  * Description: This function checks to see if it there is microcode
8649  * to download to the adapter. If there is, a download is performed.
8650  *
8651  * Return value:
8652  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8653  **/
8654 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8655 {
8656         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8657         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8658
8659         ENTER;
8660         ipr_cmd->job_step = ipr_reset_alert;
8661
8662         if (!sglist)
8663                 return IPR_RC_JOB_CONTINUE;
8664
8665         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8666         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8667         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8668         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8669         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8670         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8671         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8672
8673         if (ioa_cfg->sis64)
8674                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8675         else
8676                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8677         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8678
8679         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8680                    IPR_WRITE_BUFFER_TIMEOUT);
8681
8682         LEAVE;
8683         return IPR_RC_JOB_RETURN;
8684 }
8685
8686 /**
8687  * ipr_reset_shutdown_ioa - Shutdown the adapter
8688  * @ipr_cmd:    ipr command struct
8689  *
8690  * Description: This function issues an adapter shutdown of the
8691  * specified type to the specified adapter as part of the
8692  * adapter reset job.
8693  *
8694  * Return value:
8695  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8696  **/
8697 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8698 {
8699         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8700         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8701         unsigned long timeout;
8702         int rc = IPR_RC_JOB_CONTINUE;
8703
8704         ENTER;
8705         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8706                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8707         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8708                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8709                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8710                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8711                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8712                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8713
8714                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8715                         timeout = IPR_SHUTDOWN_TIMEOUT;
8716                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8717                         timeout = IPR_INTERNAL_TIMEOUT;
8718                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8719                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8720                 else
8721                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8722
8723                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8724
8725                 rc = IPR_RC_JOB_RETURN;
8726                 ipr_cmd->job_step = ipr_reset_ucode_download;
8727         } else
8728                 ipr_cmd->job_step = ipr_reset_alert;
8729
8730         LEAVE;
8731         return rc;
8732 }
8733
8734 /**
8735  * ipr_reset_ioa_job - Adapter reset job
8736  * @ipr_cmd:    ipr command struct
8737  *
8738  * Description: This function is the job router for the adapter reset job.
8739  *
8740  * Return value:
8741  *      none
8742  **/
8743 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8744 {
8745         u32 rc, ioasc;
8746         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8747
8748         do {
8749                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8750
8751                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8752                         /*
8753                          * We are doing nested adapter resets and this is
8754                          * not the current reset job.
8755                          */
8756                         list_add_tail(&ipr_cmd->queue,
8757                                         &ipr_cmd->hrrq->hrrq_free_q);
8758                         return;
8759                 }
8760
8761                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8762                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8763                         if (rc == IPR_RC_JOB_RETURN)
8764                                 return;
8765                 }
8766
8767                 ipr_reinit_ipr_cmnd(ipr_cmd);
8768                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8769                 rc = ipr_cmd->job_step(ipr_cmd);
8770         } while (rc == IPR_RC_JOB_CONTINUE);
8771 }
8772
8773 /**
8774  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8775  * @ioa_cfg:            ioa config struct
8776  * @job_step:           first job step of reset job
8777  * @shutdown_type:      shutdown type
8778  *
8779  * Description: This function will initiate the reset of the given adapter
8780  * starting at the selected job step.
8781  * If the caller needs to wait on the completion of the reset,
8782  * the caller must sleep on the reset_wait_q.
8783  *
8784  * Return value:
8785  *      none
8786  **/
8787 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8788                                     int (*job_step) (struct ipr_cmnd *),
8789                                     enum ipr_shutdown_type shutdown_type)
8790 {
8791         struct ipr_cmnd *ipr_cmd;
8792         int i;
8793
8794         ioa_cfg->in_reset_reload = 1;
8795         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8796                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8797                 ioa_cfg->hrrq[i].allow_cmds = 0;
8798                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8799         }
8800         wmb();
8801         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8802                 scsi_block_requests(ioa_cfg->host);
8803
8804         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8805         ioa_cfg->reset_cmd = ipr_cmd;
8806         ipr_cmd->job_step = job_step;
8807         ipr_cmd->u.shutdown_type = shutdown_type;
8808
8809         ipr_reset_ioa_job(ipr_cmd);
8810 }
8811
8812 /**
8813  * ipr_initiate_ioa_reset - Initiate an adapter reset
8814  * @ioa_cfg:            ioa config struct
8815  * @shutdown_type:      shutdown type
8816  *
8817  * Description: This function will initiate the reset of the given adapter.
8818  * If the caller needs to wait on the completion of the reset,
8819  * the caller must sleep on the reset_wait_q.
8820  *
8821  * Return value:
8822  *      none
8823  **/
8824 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8825                                    enum ipr_shutdown_type shutdown_type)
8826 {
8827         int i;
8828
8829         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8830                 return;
8831
8832         if (ioa_cfg->in_reset_reload) {
8833                 if (ioa_cfg->sdt_state == GET_DUMP)
8834                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8835                 else if (ioa_cfg->sdt_state == READ_DUMP)
8836                         ioa_cfg->sdt_state = ABORT_DUMP;
8837         }
8838
8839         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8840                 dev_err(&ioa_cfg->pdev->dev,
8841                         "IOA taken offline - error recovery failed\n");
8842
8843                 ioa_cfg->reset_retries = 0;
8844                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8845                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8846                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8847                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8848                 }
8849                 wmb();
8850
8851                 if (ioa_cfg->in_ioa_bringdown) {
8852                         ioa_cfg->reset_cmd = NULL;
8853                         ioa_cfg->in_reset_reload = 0;
8854                         ipr_fail_all_ops(ioa_cfg);
8855                         wake_up_all(&ioa_cfg->reset_wait_q);
8856
8857                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8858                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8859                                 scsi_unblock_requests(ioa_cfg->host);
8860                                 spin_lock_irq(ioa_cfg->host->host_lock);
8861                         }
8862                         return;
8863                 } else {
8864                         ioa_cfg->in_ioa_bringdown = 1;
8865                         shutdown_type = IPR_SHUTDOWN_NONE;
8866                 }
8867         }
8868
8869         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8870                                 shutdown_type);
8871 }
8872
8873 /**
8874  * ipr_reset_freeze - Hold off all I/O activity
8875  * @ipr_cmd:    ipr command struct
8876  *
8877  * Description: If the PCI slot is frozen, hold off all I/O
8878  * activity; then, as soon as the slot is available again,
8879  * initiate an adapter reset.
8880  */
8881 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8882 {
8883         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8884         int i;
8885
8886         /* Disallow new interrupts, avoid loop */
8887         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8888                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8889                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8890                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8891         }
8892         wmb();
8893         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8894         ipr_cmd->done = ipr_reset_ioa_job;
8895         return IPR_RC_JOB_RETURN;
8896 }
8897
8898 /**
8899  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8900  * @pdev:       PCI device struct
8901  *
8902  * Description: This routine is called to tell us that the MMIO
8903  * access to the IOA has been restored
8904  */
8905 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8906 {
8907         unsigned long flags = 0;
8908         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8909
8910         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8911         if (!ioa_cfg->probe_done)
8912                 pci_save_state(pdev);
8913         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8914         return PCI_ERS_RESULT_NEED_RESET;
8915 }
8916
8917 /**
8918  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8919  * @pdev:       PCI device struct
8920  *
8921  * Description: This routine is called to tell us that the PCI bus
8922  * is down. Can't do anything here, except put the device driver
8923  * into a holding pattern, waiting for the PCI bus to come back.
8924  */
8925 static void ipr_pci_frozen(struct pci_dev *pdev)
8926 {
8927         unsigned long flags = 0;
8928         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8929
8930         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8931         if (ioa_cfg->probe_done)
8932                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8933         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8934 }
8935
8936 /**
8937  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8938  * @pdev:       PCI device struct
8939  *
8940  * Description: This routine is called by the pci error recovery
8941  * code after the PCI slot has been reset, just before we
8942  * should resume normal operations.
8943  */
8944 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8945 {
8946         unsigned long flags = 0;
8947         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8948
8949         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8950         if (ioa_cfg->probe_done) {
8951                 if (ioa_cfg->needs_warm_reset)
8952                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8953                 else
8954                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8955                                                 IPR_SHUTDOWN_NONE);
8956         } else
8957                 wake_up_all(&ioa_cfg->eeh_wait_q);
8958         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8959         return PCI_ERS_RESULT_RECOVERED;
8960 }
8961
8962 /**
8963  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8964  * @pdev:       PCI device struct
8965  *
8966  * Description: This routine is called when the PCI bus has
8967  * permanently failed.
8968  */
8969 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8970 {
8971         unsigned long flags = 0;
8972         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8973         int i;
8974
8975         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8976         if (ioa_cfg->probe_done) {
8977                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8978                         ioa_cfg->sdt_state = ABORT_DUMP;
8979                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8980                 ioa_cfg->in_ioa_bringdown = 1;
8981                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8982                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8983                         ioa_cfg->hrrq[i].allow_cmds = 0;
8984                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8985                 }
8986                 wmb();
8987                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8988         } else
8989                 wake_up_all(&ioa_cfg->eeh_wait_q);
8990         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8991 }
8992
8993 /**
8994  * ipr_pci_error_detected - Called when a PCI error is detected.
8995  * @pdev:       PCI device struct
8996  * @state:      PCI channel state
8997  *
8998  * Description: Called when a PCI error is detected.
8999  *
9000  * Return value:
9001  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9002  */
9003 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9004                                                pci_channel_state_t state)
9005 {
9006         switch (state) {
9007         case pci_channel_io_frozen:
9008                 ipr_pci_frozen(pdev);
9009                 return PCI_ERS_RESULT_CAN_RECOVER;
9010         case pci_channel_io_perm_failure:
9011                 ipr_pci_perm_failure(pdev);
9012                 return PCI_ERS_RESULT_DISCONNECT;
9013                 break;
9014         default:
9015                 break;
9016         }
9017         return PCI_ERS_RESULT_NEED_RESET;
9018 }
9019
9020 /**
9021  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9022  * @ioa_cfg:    ioa cfg struct
9023  *
9024  * Description: This is the second phase of adapter intialization
9025  * This function takes care of initilizing the adapter to the point
9026  * where it can accept new commands.
9027
9028  * Return value:
9029  *      0 on success / -EIO on failure
9030  **/
9031 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9032 {
9033         int rc = 0;
9034         unsigned long host_lock_flags = 0;
9035
9036         ENTER;
9037         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9038         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9039         ioa_cfg->probe_done = 1;
9040         if (ioa_cfg->needs_hard_reset) {
9041                 ioa_cfg->needs_hard_reset = 0;
9042                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9043         } else
9044                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9045                                         IPR_SHUTDOWN_NONE);
9046         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9047
9048         LEAVE;
9049         return rc;
9050 }
9051
9052 /**
9053  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9054  * @ioa_cfg:    ioa config struct
9055  *
9056  * Return value:
9057  *      none
9058  **/
9059 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9060 {
9061         int i;
9062
9063         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9064                 if (ioa_cfg->ipr_cmnd_list[i])
9065                         dma_pool_free(ioa_cfg->ipr_cmd_pool,
9066                                       ioa_cfg->ipr_cmnd_list[i],
9067                                       ioa_cfg->ipr_cmnd_list_dma[i]);
9068
9069                 ioa_cfg->ipr_cmnd_list[i] = NULL;
9070         }
9071
9072         if (ioa_cfg->ipr_cmd_pool)
9073                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9074
9075         kfree(ioa_cfg->ipr_cmnd_list);
9076         kfree(ioa_cfg->ipr_cmnd_list_dma);
9077         ioa_cfg->ipr_cmnd_list = NULL;
9078         ioa_cfg->ipr_cmnd_list_dma = NULL;
9079         ioa_cfg->ipr_cmd_pool = NULL;
9080 }
9081
9082 /**
9083  * ipr_free_mem - Frees memory allocated for an adapter
9084  * @ioa_cfg:    ioa cfg struct
9085  *
9086  * Return value:
9087  *      nothing
9088  **/
9089 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9090 {
9091         int i;
9092
9093         kfree(ioa_cfg->res_entries);
9094         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9095                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9096         ipr_free_cmd_blks(ioa_cfg);
9097
9098         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9099                 dma_free_coherent(&ioa_cfg->pdev->dev,
9100                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9101                                   ioa_cfg->hrrq[i].host_rrq,
9102                                   ioa_cfg->hrrq[i].host_rrq_dma);
9103
9104         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9105                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9106
9107         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9108                 dma_free_coherent(&ioa_cfg->pdev->dev,
9109                                   sizeof(struct ipr_hostrcb),
9110                                   ioa_cfg->hostrcb[i],
9111                                   ioa_cfg->hostrcb_dma[i]);
9112         }
9113
9114         ipr_free_dump(ioa_cfg);
9115         kfree(ioa_cfg->trace);
9116 }
9117
9118 /**
9119  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9120  * @ioa_cfg:    ipr cfg struct
9121  *
9122  * This function frees all allocated IRQs for the
9123  * specified adapter.
9124  *
9125  * Return value:
9126  *      none
9127  **/
9128 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9129 {
9130         struct pci_dev *pdev = ioa_cfg->pdev;
9131
9132         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9133             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9134                 int i;
9135                 for (i = 0; i < ioa_cfg->nvectors; i++)
9136                         free_irq(ioa_cfg->vectors_info[i].vec,
9137                                  &ioa_cfg->hrrq[i]);
9138         } else
9139                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9140
9141         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9142                 pci_disable_msi(pdev);
9143                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9144         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9145                 pci_disable_msix(pdev);
9146                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9147         }
9148 }
9149
9150 /**
9151  * ipr_free_all_resources - Free all allocated resources for an adapter.
9152  * @ipr_cmd:    ipr command struct
9153  *
9154  * This function frees all allocated resources for the
9155  * specified adapter.
9156  *
9157  * Return value:
9158  *      none
9159  **/
9160 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9161 {
9162         struct pci_dev *pdev = ioa_cfg->pdev;
9163
9164         ENTER;
9165         ipr_free_irqs(ioa_cfg);
9166         if (ioa_cfg->reset_work_q)
9167                 destroy_workqueue(ioa_cfg->reset_work_q);
9168         iounmap(ioa_cfg->hdw_dma_regs);
9169         pci_release_regions(pdev);
9170         ipr_free_mem(ioa_cfg);
9171         scsi_host_put(ioa_cfg->host);
9172         pci_disable_device(pdev);
9173         LEAVE;
9174 }
9175
9176 /**
9177  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9178  * @ioa_cfg:    ioa config struct
9179  *
9180  * Return value:
9181  *      0 on success / -ENOMEM on allocation failure
9182  **/
9183 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9184 {
9185         struct ipr_cmnd *ipr_cmd;
9186         struct ipr_ioarcb *ioarcb;
9187         dma_addr_t dma_addr;
9188         int i, entries_each_hrrq, hrrq_id = 0;
9189
9190         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9191                                                 sizeof(struct ipr_cmnd), 512, 0);
9192
9193         if (!ioa_cfg->ipr_cmd_pool)
9194                 return -ENOMEM;
9195
9196         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9197         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9198
9199         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9200                 ipr_free_cmd_blks(ioa_cfg);
9201                 return -ENOMEM;
9202         }
9203
9204         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9205                 if (ioa_cfg->hrrq_num > 1) {
9206                         if (i == 0) {
9207                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9208                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9209                                         ioa_cfg->hrrq[i].max_cmd_id =
9210                                                 (entries_each_hrrq - 1);
9211                         } else {
9212                                 entries_each_hrrq =
9213                                         IPR_NUM_BASE_CMD_BLKS/
9214                                         (ioa_cfg->hrrq_num - 1);
9215                                 ioa_cfg->hrrq[i].min_cmd_id =
9216                                         IPR_NUM_INTERNAL_CMD_BLKS +
9217                                         (i - 1) * entries_each_hrrq;
9218                                 ioa_cfg->hrrq[i].max_cmd_id =
9219                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9220                                         i * entries_each_hrrq - 1);
9221                         }
9222                 } else {
9223                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9224                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9225                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9226                 }
9227                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9228         }
9229
9230         BUG_ON(ioa_cfg->hrrq_num == 0);
9231
9232         i = IPR_NUM_CMD_BLKS -
9233                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9234         if (i > 0) {
9235                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9236                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9237         }
9238
9239         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9240                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9241
9242                 if (!ipr_cmd) {
9243                         ipr_free_cmd_blks(ioa_cfg);
9244                         return -ENOMEM;
9245                 }
9246
9247                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9248                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9249                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9250
9251                 ioarcb = &ipr_cmd->ioarcb;
9252                 ipr_cmd->dma_addr = dma_addr;
9253                 if (ioa_cfg->sis64)
9254                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9255                 else
9256                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9257
9258                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9259                 if (ioa_cfg->sis64) {
9260                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9261                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9262                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9263                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9264                 } else {
9265                         ioarcb->write_ioadl_addr =
9266                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9267                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9268                         ioarcb->ioasa_host_pci_addr =
9269                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9270                 }
9271                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9272                 ipr_cmd->cmd_index = i;
9273                 ipr_cmd->ioa_cfg = ioa_cfg;
9274                 ipr_cmd->sense_buffer_dma = dma_addr +
9275                         offsetof(struct ipr_cmnd, sense_buffer);
9276
9277                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9278                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9279                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9280                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9281                         hrrq_id++;
9282         }
9283
9284         return 0;
9285 }
9286
9287 /**
9288  * ipr_alloc_mem - Allocate memory for an adapter
9289  * @ioa_cfg:    ioa config struct
9290  *
9291  * Return value:
9292  *      0 on success / non-zero for error
9293  **/
9294 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9295 {
9296         struct pci_dev *pdev = ioa_cfg->pdev;
9297         int i, rc = -ENOMEM;
9298
9299         ENTER;
9300         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9301                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9302
9303         if (!ioa_cfg->res_entries)
9304                 goto out;
9305
9306         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9307                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9308                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9309         }
9310
9311         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9312                                               sizeof(struct ipr_misc_cbs),
9313                                               &ioa_cfg->vpd_cbs_dma,
9314                                               GFP_KERNEL);
9315
9316         if (!ioa_cfg->vpd_cbs)
9317                 goto out_free_res_entries;
9318
9319         if (ipr_alloc_cmd_blks(ioa_cfg))
9320                 goto out_free_vpd_cbs;
9321
9322         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9323                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9324                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9325                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9326                                         GFP_KERNEL);
9327
9328                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9329                         while (--i > 0)
9330                                 dma_free_coherent(&pdev->dev,
9331                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9332                                         ioa_cfg->hrrq[i].host_rrq,
9333                                         ioa_cfg->hrrq[i].host_rrq_dma);
9334                         goto out_ipr_free_cmd_blocks;
9335                 }
9336                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9337         }
9338
9339         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9340                                                   ioa_cfg->cfg_table_size,
9341                                                   &ioa_cfg->cfg_table_dma,
9342                                                   GFP_KERNEL);
9343
9344         if (!ioa_cfg->u.cfg_table)
9345                 goto out_free_host_rrq;
9346
9347         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9348                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9349                                                          sizeof(struct ipr_hostrcb),
9350                                                          &ioa_cfg->hostrcb_dma[i],
9351                                                          GFP_KERNEL);
9352
9353                 if (!ioa_cfg->hostrcb[i])
9354                         goto out_free_hostrcb_dma;
9355
9356                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9357                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9358                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9359                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9360         }
9361
9362         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9363                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9364
9365         if (!ioa_cfg->trace)
9366                 goto out_free_hostrcb_dma;
9367
9368         rc = 0;
9369 out:
9370         LEAVE;
9371         return rc;
9372
9373 out_free_hostrcb_dma:
9374         while (i-- > 0) {
9375                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9376                                   ioa_cfg->hostrcb[i],
9377                                   ioa_cfg->hostrcb_dma[i]);
9378         }
9379         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9380                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9381 out_free_host_rrq:
9382         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9383                 dma_free_coherent(&pdev->dev,
9384                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9385                                   ioa_cfg->hrrq[i].host_rrq,
9386                                   ioa_cfg->hrrq[i].host_rrq_dma);
9387         }
9388 out_ipr_free_cmd_blocks:
9389         ipr_free_cmd_blks(ioa_cfg);
9390 out_free_vpd_cbs:
9391         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9392                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9393 out_free_res_entries:
9394         kfree(ioa_cfg->res_entries);
9395         goto out;
9396 }
9397
9398 /**
9399  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9400  * @ioa_cfg:    ioa config struct
9401  *
9402  * Return value:
9403  *      none
9404  **/
9405 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9406 {
9407         int i;
9408
9409         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9410                 ioa_cfg->bus_attr[i].bus = i;
9411                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9412                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9413                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9414                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9415                 else
9416                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9417         }
9418 }
9419
9420 /**
9421  * ipr_init_regs - Initialize IOA registers
9422  * @ioa_cfg:    ioa config struct
9423  *
9424  * Return value:
9425  *      none
9426  **/
9427 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9428 {
9429         const struct ipr_interrupt_offsets *p;
9430         struct ipr_interrupts *t;
9431         void __iomem *base;
9432
9433         p = &ioa_cfg->chip_cfg->regs;
9434         t = &ioa_cfg->regs;
9435         base = ioa_cfg->hdw_dma_regs;
9436
9437         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9438         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9439         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9440         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9441         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9442         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9443         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9444         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9445         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9446         t->ioarrin_reg = base + p->ioarrin_reg;
9447         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9448         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9449         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9450         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9451         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9452         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9453
9454         if (ioa_cfg->sis64) {
9455                 t->init_feedback_reg = base + p->init_feedback_reg;
9456                 t->dump_addr_reg = base + p->dump_addr_reg;
9457                 t->dump_data_reg = base + p->dump_data_reg;
9458                 t->endian_swap_reg = base + p->endian_swap_reg;
9459         }
9460 }
9461
9462 /**
9463  * ipr_init_ioa_cfg - Initialize IOA config struct
9464  * @ioa_cfg:    ioa config struct
9465  * @host:               scsi host struct
9466  * @pdev:               PCI dev struct
9467  *
9468  * Return value:
9469  *      none
9470  **/
9471 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9472                              struct Scsi_Host *host, struct pci_dev *pdev)
9473 {
9474         int i;
9475
9476         ioa_cfg->host = host;
9477         ioa_cfg->pdev = pdev;
9478         ioa_cfg->log_level = ipr_log_level;
9479         ioa_cfg->doorbell = IPR_DOORBELL;
9480         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9481         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9482         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9483         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9484         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9485         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9486
9487         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9488         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9489         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9490         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9491         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9492         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9493         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9494         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9495         ioa_cfg->sdt_state = INACTIVE;
9496
9497         ipr_initialize_bus_attr(ioa_cfg);
9498         ioa_cfg->max_devs_supported = ipr_max_devs;
9499
9500         if (ioa_cfg->sis64) {
9501                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9502                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9503                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9504                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9505                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9506                                            + ((sizeof(struct ipr_config_table_entry64)
9507                                                * ioa_cfg->max_devs_supported)));
9508         } else {
9509                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9510                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9511                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9512                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9513                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9514                                            + ((sizeof(struct ipr_config_table_entry)
9515                                                * ioa_cfg->max_devs_supported)));
9516         }
9517
9518         host->max_channel = IPR_VSET_BUS;
9519         host->unique_id = host->host_no;
9520         host->max_cmd_len = IPR_MAX_CDB_LEN;
9521         host->can_queue = ioa_cfg->max_cmds;
9522         pci_set_drvdata(pdev, ioa_cfg);
9523
9524         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9525                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9526                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9527                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9528                 if (i == 0)
9529                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9530                 else
9531                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9532         }
9533 }
9534
9535 /**
9536  * ipr_get_chip_info - Find adapter chip information
9537  * @dev_id:             PCI device id struct
9538  *
9539  * Return value:
9540  *      ptr to chip information on success / NULL on failure
9541  **/
9542 static const struct ipr_chip_t *
9543 ipr_get_chip_info(const struct pci_device_id *dev_id)
9544 {
9545         int i;
9546
9547         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9548                 if (ipr_chip[i].vendor == dev_id->vendor &&
9549                     ipr_chip[i].device == dev_id->device)
9550                         return &ipr_chip[i];
9551         return NULL;
9552 }
9553
9554 /**
9555  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9556  *                                              during probe time
9557  * @ioa_cfg:    ioa config struct
9558  *
9559  * Return value:
9560  *      None
9561  **/
9562 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9563 {
9564         struct pci_dev *pdev = ioa_cfg->pdev;
9565
9566         if (pci_channel_offline(pdev)) {
9567                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9568                                    !pci_channel_offline(pdev),
9569                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9570                 pci_restore_state(pdev);
9571         }
9572 }
9573
9574 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9575 {
9576         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9577         int i, vectors;
9578
9579         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9580                 entries[i].entry = i;
9581
9582         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9583                                         entries, 1, ipr_number_of_msix);
9584         if (vectors < 0) {
9585                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9586                 return vectors;
9587         }
9588
9589         for (i = 0; i < vectors; i++)
9590                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9591         ioa_cfg->nvectors = vectors;
9592
9593         return 0;
9594 }
9595
9596 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9597 {
9598         int i, vectors;
9599
9600         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9601         if (vectors < 0) {
9602                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9603                 return vectors;
9604         }
9605
9606         for (i = 0; i < vectors; i++)
9607                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9608         ioa_cfg->nvectors = vectors;
9609
9610         return 0;
9611 }
9612
9613 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9614 {
9615         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9616
9617         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9618                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9619                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9620                 ioa_cfg->vectors_info[vec_idx].
9621                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9622         }
9623 }
9624
9625 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9626 {
9627         int i, rc;
9628
9629         for (i = 1; i < ioa_cfg->nvectors; i++) {
9630                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9631                         ipr_isr_mhrrq,
9632                         0,
9633                         ioa_cfg->vectors_info[i].desc,
9634                         &ioa_cfg->hrrq[i]);
9635                 if (rc) {
9636                         while (--i >= 0)
9637                                 free_irq(ioa_cfg->vectors_info[i].vec,
9638                                         &ioa_cfg->hrrq[i]);
9639                         return rc;
9640                 }
9641         }
9642         return 0;
9643 }
9644
9645 /**
9646  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9647  * @pdev:               PCI device struct
9648  *
9649  * Description: Simply set the msi_received flag to 1 indicating that
9650  * Message Signaled Interrupts are supported.
9651  *
9652  * Return value:
9653  *      0 on success / non-zero on failure
9654  **/
9655 static irqreturn_t ipr_test_intr(int irq, void *devp)
9656 {
9657         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9658         unsigned long lock_flags = 0;
9659         irqreturn_t rc = IRQ_HANDLED;
9660
9661         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9662         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9663
9664         ioa_cfg->msi_received = 1;
9665         wake_up(&ioa_cfg->msi_wait_q);
9666
9667         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9668         return rc;
9669 }
9670
9671 /**
9672  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9673  * @pdev:               PCI device struct
9674  *
9675  * Description: The return value from pci_enable_msi_range() can not always be
9676  * trusted.  This routine sets up and initiates a test interrupt to determine
9677  * if the interrupt is received via the ipr_test_intr() service routine.
9678  * If the tests fails, the driver will fall back to LSI.
9679  *
9680  * Return value:
9681  *      0 on success / non-zero on failure
9682  **/
9683 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9684 {
9685         int rc;
9686         volatile u32 int_reg;
9687         unsigned long lock_flags = 0;
9688
9689         ENTER;
9690
9691         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9692         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9693         ioa_cfg->msi_received = 0;
9694         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9695         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9696         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9697         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9698
9699         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9700                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9701         else
9702                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9703         if (rc) {
9704                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9705                 return rc;
9706         } else if (ipr_debug)
9707                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9708
9709         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9710         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9711         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9712         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9713         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9714
9715         if (!ioa_cfg->msi_received) {
9716                 /* MSI test failed */
9717                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9718                 rc = -EOPNOTSUPP;
9719         } else if (ipr_debug)
9720                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9721
9722         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9723
9724         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9725                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9726         else
9727                 free_irq(pdev->irq, ioa_cfg);
9728
9729         LEAVE;
9730
9731         return rc;
9732 }
9733
9734  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9735  * @pdev:               PCI device struct
9736  * @dev_id:             PCI device id struct
9737  *
9738  * Return value:
9739  *      0 on success / non-zero on failure
9740  **/
9741 static int ipr_probe_ioa(struct pci_dev *pdev,
9742                          const struct pci_device_id *dev_id)
9743 {
9744         struct ipr_ioa_cfg *ioa_cfg;
9745         struct Scsi_Host *host;
9746         unsigned long ipr_regs_pci;
9747         void __iomem *ipr_regs;
9748         int rc = PCIBIOS_SUCCESSFUL;
9749         volatile u32 mask, uproc, interrupts;
9750         unsigned long lock_flags, driver_lock_flags;
9751
9752         ENTER;
9753
9754         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9755         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9756
9757         if (!host) {
9758                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9759                 rc = -ENOMEM;
9760                 goto out;
9761         }
9762
9763         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9764         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9765         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9766
9767         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9768
9769         if (!ioa_cfg->ipr_chip) {
9770                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9771                         dev_id->vendor, dev_id->device);
9772                 goto out_scsi_host_put;
9773         }
9774
9775         /* set SIS 32 or SIS 64 */
9776         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9777         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9778         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9779         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9780
9781         if (ipr_transop_timeout)
9782                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9783         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9784                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9785         else
9786                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9787
9788         ioa_cfg->revid = pdev->revision;
9789
9790         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9791
9792         ipr_regs_pci = pci_resource_start(pdev, 0);
9793
9794         rc = pci_request_regions(pdev, IPR_NAME);
9795         if (rc < 0) {
9796                 dev_err(&pdev->dev,
9797                         "Couldn't register memory range of registers\n");
9798                 goto out_scsi_host_put;
9799         }
9800
9801         rc = pci_enable_device(pdev);
9802
9803         if (rc || pci_channel_offline(pdev)) {
9804                 if (pci_channel_offline(pdev)) {
9805                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9806                         rc = pci_enable_device(pdev);
9807                 }
9808
9809                 if (rc) {
9810                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9811                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9812                         goto out_release_regions;
9813                 }
9814         }
9815
9816         ipr_regs = pci_ioremap_bar(pdev, 0);
9817
9818         if (!ipr_regs) {
9819                 dev_err(&pdev->dev,
9820                         "Couldn't map memory range of registers\n");
9821                 rc = -ENOMEM;
9822                 goto out_disable;
9823         }
9824
9825         ioa_cfg->hdw_dma_regs = ipr_regs;
9826         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9827         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9828
9829         ipr_init_regs(ioa_cfg);
9830
9831         if (ioa_cfg->sis64) {
9832                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9833                 if (rc < 0) {
9834                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9835                         rc = dma_set_mask_and_coherent(&pdev->dev,
9836                                                        DMA_BIT_MASK(32));
9837                 }
9838         } else
9839                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9840
9841         if (rc < 0) {
9842                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9843                 goto cleanup_nomem;
9844         }
9845
9846         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9847                                    ioa_cfg->chip_cfg->cache_line_size);
9848
9849         if (rc != PCIBIOS_SUCCESSFUL) {
9850                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9851                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9852                 rc = -EIO;
9853                 goto cleanup_nomem;
9854         }
9855
9856         /* Issue MMIO read to ensure card is not in EEH */
9857         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9858         ipr_wait_for_pci_err_recovery(ioa_cfg);
9859
9860         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9861                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9862                         IPR_MAX_MSIX_VECTORS);
9863                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9864         }
9865
9866         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9867                         ipr_enable_msix(ioa_cfg) == 0)
9868                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9869         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9870                         ipr_enable_msi(ioa_cfg) == 0)
9871                 ioa_cfg->intr_flag = IPR_USE_MSI;
9872         else {
9873                 ioa_cfg->intr_flag = IPR_USE_LSI;
9874                 ioa_cfg->nvectors = 1;
9875                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9876         }
9877
9878         pci_set_master(pdev);
9879
9880         if (pci_channel_offline(pdev)) {
9881                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9882                 pci_set_master(pdev);
9883                 if (pci_channel_offline(pdev)) {
9884                         rc = -EIO;
9885                         goto out_msi_disable;
9886                 }
9887         }
9888
9889         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9890             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9891                 rc = ipr_test_msi(ioa_cfg, pdev);
9892                 if (rc == -EOPNOTSUPP) {
9893                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9894                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9895                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9896                                 pci_disable_msi(pdev);
9897                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9898                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9899                                 pci_disable_msix(pdev);
9900                         }
9901
9902                         ioa_cfg->intr_flag = IPR_USE_LSI;
9903                         ioa_cfg->nvectors = 1;
9904                 }
9905                 else if (rc)
9906                         goto out_msi_disable;
9907                 else {
9908                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9909                                 dev_info(&pdev->dev,
9910                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9911                                         ioa_cfg->nvectors, pdev->irq);
9912                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9913                                 dev_info(&pdev->dev,
9914                                         "Request for %d MSIXs succeeded.",
9915                                         ioa_cfg->nvectors);
9916                 }
9917         }
9918
9919         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9920                                 (unsigned int)num_online_cpus(),
9921                                 (unsigned int)IPR_MAX_HRRQ_NUM);
9922
9923         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9924                 goto out_msi_disable;
9925
9926         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9927                 goto out_msi_disable;
9928
9929         rc = ipr_alloc_mem(ioa_cfg);
9930         if (rc < 0) {
9931                 dev_err(&pdev->dev,
9932                         "Couldn't allocate enough memory for device driver!\n");
9933                 goto out_msi_disable;
9934         }
9935
9936         /* Save away PCI config space for use following IOA reset */
9937         rc = pci_save_state(pdev);
9938
9939         if (rc != PCIBIOS_SUCCESSFUL) {
9940                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9941                 rc = -EIO;
9942                 goto cleanup_nolog;
9943         }
9944
9945         /*
9946          * If HRRQ updated interrupt is not masked, or reset alert is set,
9947          * the card is in an unknown state and needs a hard reset
9948          */
9949         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9950         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9951         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9952         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9953                 ioa_cfg->needs_hard_reset = 1;
9954         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9955                 ioa_cfg->needs_hard_reset = 1;
9956         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9957                 ioa_cfg->ioa_unit_checked = 1;
9958
9959         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9960         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9961         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9962
9963         if (ioa_cfg->intr_flag == IPR_USE_MSI
9964                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9965                 name_msi_vectors(ioa_cfg);
9966                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9967                         0,
9968                         ioa_cfg->vectors_info[0].desc,
9969                         &ioa_cfg->hrrq[0]);
9970                 if (!rc)
9971                         rc = ipr_request_other_msi_irqs(ioa_cfg);
9972         } else {
9973                 rc = request_irq(pdev->irq, ipr_isr,
9974                          IRQF_SHARED,
9975                          IPR_NAME, &ioa_cfg->hrrq[0]);
9976         }
9977         if (rc) {
9978                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9979                         pdev->irq, rc);
9980                 goto cleanup_nolog;
9981         }
9982
9983         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9984             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9985                 ioa_cfg->needs_warm_reset = 1;
9986                 ioa_cfg->reset = ipr_reset_slot_reset;
9987
9988                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9989                                                                 WQ_MEM_RECLAIM, host->host_no);
9990
9991                 if (!ioa_cfg->reset_work_q) {
9992                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
9993                         goto out_free_irq;
9994                 }
9995         } else
9996                 ioa_cfg->reset = ipr_reset_start_bist;
9997
9998         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9999         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10000         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10001
10002         LEAVE;
10003 out:
10004         return rc;
10005
10006 out_free_irq:
10007         ipr_free_irqs(ioa_cfg);
10008 cleanup_nolog:
10009         ipr_free_mem(ioa_cfg);
10010 out_msi_disable:
10011         ipr_wait_for_pci_err_recovery(ioa_cfg);
10012         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10013                 pci_disable_msi(pdev);
10014         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10015                 pci_disable_msix(pdev);
10016 cleanup_nomem:
10017         iounmap(ipr_regs);
10018 out_disable:
10019         pci_disable_device(pdev);
10020 out_release_regions:
10021         pci_release_regions(pdev);
10022 out_scsi_host_put:
10023         scsi_host_put(host);
10024         goto out;
10025 }
10026
10027 /**
10028  * ipr_initiate_ioa_bringdown - Bring down an adapter
10029  * @ioa_cfg:            ioa config struct
10030  * @shutdown_type:      shutdown type
10031  *
10032  * Description: This function will initiate bringing down the adapter.
10033  * This consists of issuing an IOA shutdown to the adapter
10034  * to flush the cache, and running BIST.
10035  * If the caller needs to wait on the completion of the reset,
10036  * the caller must sleep on the reset_wait_q.
10037  *
10038  * Return value:
10039  *      none
10040  **/
10041 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10042                                        enum ipr_shutdown_type shutdown_type)
10043 {
10044         ENTER;
10045         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10046                 ioa_cfg->sdt_state = ABORT_DUMP;
10047         ioa_cfg->reset_retries = 0;
10048         ioa_cfg->in_ioa_bringdown = 1;
10049         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10050         LEAVE;
10051 }
10052
10053 /**
10054  * __ipr_remove - Remove a single adapter
10055  * @pdev:       pci device struct
10056  *
10057  * Adapter hot plug remove entry point.
10058  *
10059  * Return value:
10060  *      none
10061  **/
10062 static void __ipr_remove(struct pci_dev *pdev)
10063 {
10064         unsigned long host_lock_flags = 0;
10065         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10066         int i;
10067         unsigned long driver_lock_flags;
10068         ENTER;
10069
10070         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10071         while (ioa_cfg->in_reset_reload) {
10072                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10073                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10074                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10075         }
10076
10077         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10078                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10079                 ioa_cfg->hrrq[i].removing_ioa = 1;
10080                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10081         }
10082         wmb();
10083         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10084
10085         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10086         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10087         flush_work(&ioa_cfg->work_q);
10088         if (ioa_cfg->reset_work_q)
10089                 flush_workqueue(ioa_cfg->reset_work_q);
10090         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10091         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10092
10093         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10094         list_del(&ioa_cfg->queue);
10095         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10096
10097         if (ioa_cfg->sdt_state == ABORT_DUMP)
10098                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10099         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10100
10101         ipr_free_all_resources(ioa_cfg);
10102
10103         LEAVE;
10104 }
10105
10106 /**
10107  * ipr_remove - IOA hot plug remove entry point
10108  * @pdev:       pci device struct
10109  *
10110  * Adapter hot plug remove entry point.
10111  *
10112  * Return value:
10113  *      none
10114  **/
10115 static void ipr_remove(struct pci_dev *pdev)
10116 {
10117         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10118
10119         ENTER;
10120
10121         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10122                               &ipr_trace_attr);
10123         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10124                              &ipr_dump_attr);
10125         scsi_remove_host(ioa_cfg->host);
10126
10127         __ipr_remove(pdev);
10128
10129         LEAVE;
10130 }
10131
10132 /**
10133  * ipr_probe - Adapter hot plug add entry point
10134  *
10135  * Return value:
10136  *      0 on success / non-zero on failure
10137  **/
10138 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10139 {
10140         struct ipr_ioa_cfg *ioa_cfg;
10141         int rc, i;
10142
10143         rc = ipr_probe_ioa(pdev, dev_id);
10144
10145         if (rc)
10146                 return rc;
10147
10148         ioa_cfg = pci_get_drvdata(pdev);
10149         rc = ipr_probe_ioa_part2(ioa_cfg);
10150
10151         if (rc) {
10152                 __ipr_remove(pdev);
10153                 return rc;
10154         }
10155
10156         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10157
10158         if (rc) {
10159                 __ipr_remove(pdev);
10160                 return rc;
10161         }
10162
10163         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10164                                    &ipr_trace_attr);
10165
10166         if (rc) {
10167                 scsi_remove_host(ioa_cfg->host);
10168                 __ipr_remove(pdev);
10169                 return rc;
10170         }
10171
10172         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10173                                    &ipr_dump_attr);
10174
10175         if (rc) {
10176                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10177                                       &ipr_trace_attr);
10178                 scsi_remove_host(ioa_cfg->host);
10179                 __ipr_remove(pdev);
10180                 return rc;
10181         }
10182
10183         scsi_scan_host(ioa_cfg->host);
10184         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10185
10186         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10187                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10188                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10189                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10190                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10191                 }
10192         }
10193
10194         schedule_work(&ioa_cfg->work_q);
10195         return 0;
10196 }
10197
10198 /**
10199  * ipr_shutdown - Shutdown handler.
10200  * @pdev:       pci device struct
10201  *
10202  * This function is invoked upon system shutdown/reboot. It will issue
10203  * an adapter shutdown to the adapter to flush the write cache.
10204  *
10205  * Return value:
10206  *      none
10207  **/
10208 static void ipr_shutdown(struct pci_dev *pdev)
10209 {
10210         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10211         unsigned long lock_flags = 0;
10212         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10213         int i;
10214
10215         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10216         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10217                 ioa_cfg->iopoll_weight = 0;
10218                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10219                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10220         }
10221
10222         while (ioa_cfg->in_reset_reload) {
10223                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10224                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10225                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10226         }
10227
10228         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10229                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10230
10231         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10232         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10233         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10234         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10235                 ipr_free_irqs(ioa_cfg);
10236                 pci_disable_device(ioa_cfg->pdev);
10237         }
10238 }
10239
10240 static struct pci_device_id ipr_pci_table[] = {
10241         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10242                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10243         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10244                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10245         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10246                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10247         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10248                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10249         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10250                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10251         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10252                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10253         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10254                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10255         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10256                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10257                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10258         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10259               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10260         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10261               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10262               IPR_USE_LONG_TRANSOP_TIMEOUT },
10263         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10264               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10265               IPR_USE_LONG_TRANSOP_TIMEOUT },
10266         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10267               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10268         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10269               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10270               IPR_USE_LONG_TRANSOP_TIMEOUT},
10271         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10272               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10273               IPR_USE_LONG_TRANSOP_TIMEOUT },
10274         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10275               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10276               IPR_USE_LONG_TRANSOP_TIMEOUT },
10277         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10278               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10279         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10280               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10281         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10282               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10283               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10284         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10285                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10286         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10287                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10288         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10289                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10290                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10291         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10292                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10293                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10294         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10295                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10296         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10297                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10298         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10299                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10300         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10301                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10302         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10303                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10304         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10305                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10306         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10307                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10308         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10309                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10310         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10311                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10312         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10313                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10314         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10315                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10316         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10317                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10318         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10319                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10320         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10321                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10322         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10323                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10324         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10325                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10326         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10327                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10328         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10329                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10330         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10331                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10332         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10333                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10334         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10335                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10336         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10337                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10338         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10339                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10340         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10341                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10342         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10343                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10344         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10345                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10346         { }
10347 };
10348 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10349
10350 static const struct pci_error_handlers ipr_err_handler = {
10351         .error_detected = ipr_pci_error_detected,
10352         .mmio_enabled = ipr_pci_mmio_enabled,
10353         .slot_reset = ipr_pci_slot_reset,
10354 };
10355
10356 static struct pci_driver ipr_driver = {
10357         .name = IPR_NAME,
10358         .id_table = ipr_pci_table,
10359         .probe = ipr_probe,
10360         .remove = ipr_remove,
10361         .shutdown = ipr_shutdown,
10362         .err_handler = &ipr_err_handler,
10363 };
10364
10365 /**
10366  * ipr_halt_done - Shutdown prepare completion
10367  *
10368  * Return value:
10369  *      none
10370  **/
10371 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10372 {
10373         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10374 }
10375
10376 /**
10377  * ipr_halt - Issue shutdown prepare to all adapters
10378  *
10379  * Return value:
10380  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10381  **/
10382 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10383 {
10384         struct ipr_cmnd *ipr_cmd;
10385         struct ipr_ioa_cfg *ioa_cfg;
10386         unsigned long flags = 0, driver_lock_flags;
10387
10388         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10389                 return NOTIFY_DONE;
10390
10391         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10392
10393         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10394                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10395                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10396                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10397                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10398                         continue;
10399                 }
10400
10401                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10402                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10403                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10404                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10405                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10406
10407                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10408                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10409         }
10410         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10411
10412         return NOTIFY_OK;
10413 }
10414
10415 static struct notifier_block ipr_notifier = {
10416         ipr_halt, NULL, 0
10417 };
10418
10419 /**
10420  * ipr_init - Module entry point
10421  *
10422  * Return value:
10423  *      0 on success / negative value on failure
10424  **/
10425 static int __init ipr_init(void)
10426 {
10427         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10428                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10429
10430         register_reboot_notifier(&ipr_notifier);
10431         return pci_register_driver(&ipr_driver);
10432 }
10433
10434 /**
10435  * ipr_exit - Module unload
10436  *
10437  * Module unload entry point.
10438  *
10439  * Return value:
10440  *      none
10441  **/
10442 static void __exit ipr_exit(void)
10443 {
10444         unregister_reboot_notifier(&ipr_notifier);
10445         pci_unregister_driver(&ipr_driver);
10446 }
10447
10448 module_init(ipr_init);
10449 module_exit(ipr_exit);