2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size = 0x20,
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
116 .clr_interrupt_mask_reg32 = 0x00230,
117 .sense_interrupt_mask_reg = 0x0022C,
118 .sense_interrupt_mask_reg32 = 0x0022C,
119 .clr_interrupt_reg = 0x00228,
120 .clr_interrupt_reg32 = 0x00228,
121 .sense_interrupt_reg = 0x00224,
122 .sense_interrupt_reg32 = 0x00224,
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
125 .sense_uproc_interrupt_reg32 = 0x00214,
126 .set_uproc_interrupt_reg = 0x00214,
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size = 0x20,
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
141 .clr_interrupt_mask_reg32 = 0x0028C,
142 .sense_interrupt_mask_reg = 0x00288,
143 .sense_interrupt_mask_reg32 = 0x00288,
144 .clr_interrupt_reg = 0x00284,
145 .clr_interrupt_reg32 = 0x00284,
146 .sense_interrupt_reg = 0x00280,
147 .sense_interrupt_reg32 = 0x00280,
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
150 .sense_uproc_interrupt_reg32 = 0x00290,
151 .set_uproc_interrupt_reg = 0x00290,
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
160 .cache_line_size = 0x20,
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
166 .clr_interrupt_mask_reg32 = 0x0001C,
167 .sense_interrupt_mask_reg = 0x00010,
168 .sense_interrupt_mask_reg32 = 0x00014,
169 .clr_interrupt_reg = 0x00008,
170 .clr_interrupt_reg32 = 0x0000C,
171 .sense_interrupt_reg = 0x00000,
172 .sense_interrupt_reg32 = 0x00004,
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
175 .sense_uproc_interrupt_reg32 = 0x00024,
176 .set_uproc_interrupt_reg = 0x00020,
177 .set_uproc_interrupt_reg32 = 0x00024,
178 .clr_uproc_interrupt_reg = 0x00028,
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
181 .dump_addr_reg = 0x00064,
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
188 static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
200 static int ipr_max_bus_speeds[] = {
201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed, ipr_max_speed, uint, 0);
207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level, ipr_log_level, uint, 0);
209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode, ipr_testmode, int, 0);
211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs, ipr_max_devs, int, 0);
221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION);
230 /* A constant array of IOASCs/URCs/Error Messages */
232 struct ipr_error_table_t ipr_error_table[] = {
233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
234 "8155: An unknown error was received"},
236 "Soft underlength error"},
238 "Command to be cancelled not found"},
240 "Qualified success"},
241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
242 "FFFE: Soft device bus error recovered by the IOA"},
243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
244 "4101: Soft device bus fabric error"},
245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FFFC: Logical block guard error recovered by the device"},
247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFFC: Logical block reference tag error recovered by the device"},
249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250 "4171: Recovered scatter list tag / sequence number error"},
251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "FFFD: Recovered logical block reference tag error detected by the IOA"},
257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258 "FFFD: Logical block guard error recovered by the IOA"},
259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
260 "FFF9: Device sector reassign successful"},
261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
262 "FFF7: Media error recovered by device rewrite procedures"},
263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
264 "7001: IOA sector reassignment successful"},
265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
266 "FFF9: Soft media error. Sector reassignment recommended"},
267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
268 "FFF7: Media error recovered by IOA rewrite procedures"},
269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
270 "FF3D: Soft PCI bus error recovered by the IOA"},
271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
272 "FFF6: Device hardware error recovered by the IOA"},
273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
274 "FFF6: Device hardware error recovered by the device"},
275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
276 "FF3D: Soft IOA error recovered by the IOA"},
277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
278 "FFFA: Undefined device response recovered by the IOA"},
279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280 "FFF6: Device bus error, message or command phase"},
281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
282 "FFFE: Task Management Function failed"},
283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "FFF6: Failure prediction threshold exceeded"},
285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
286 "8009: Impending cache battery pack failure"},
288 "Logical Unit in process of becoming ready"},
290 "Initializing command required"},
292 "34FF: Disk device format in progress"},
294 "Logical unit not accessible, target port in unavailable state"},
295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296 "9070: IOA requested reset"},
298 "Synchronization required"},
300 "IOA microcode download required"},
302 "Device bus connection is prohibited by host"},
304 "No ready, IOA shutdown"},
306 "Not ready, IOA has been shutdown"},
307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
308 "3020: Storage subsystem configuration error"},
310 "FFF5: Medium error, data unreadable, recommend reassign"},
312 "7000: Medium error, data unreadable, do not reassign"},
313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
314 "FFF3: Disk media format bad"},
315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
316 "3002: Addressed device failed to respond to selection"},
317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
318 "3100: Device bus error"},
319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
320 "3109: IOA timed out a device command"},
322 "3120: SCSI bus is not operational"},
323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
324 "4100: Hard device bus fabric error"},
325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326 "310C: Logical block guard error detected by the device"},
327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "310C: Logical block reference tag error detected by the device"},
329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "4170: Scatter list tag / sequence number error"},
331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332 "8150: Logical block CRC error on IOA to Host transfer"},
333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334 "4170: Logical block sequence number error on IOA to Host transfer"},
335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336 "310D: Logical block reference tag error detected by the IOA"},
337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338 "310D: Logical block guard error detected by the IOA"},
339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
340 "9000: IOA reserved area data check"},
341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
342 "9001: IOA reserved area invalid data pattern"},
343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
344 "9002: IOA reserved area LRC error"},
345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346 "Hardware Error, IOA metadata access error"},
347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
348 "102E: Out of alternate sectors for disk storage"},
349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
350 "FFF4: Data transfer underlength error"},
351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
352 "FFF4: Data transfer overlength error"},
353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
354 "3400: Logical unit failure"},
355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
356 "FFF4: Device microcode is corrupt"},
357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
358 "8150: PCI bus error"},
360 "Unsupported device bus message received"},
361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
362 "FFF4: Disk device problem"},
363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
364 "8150: Permanent IOA failure"},
365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
366 "3010: Disk device returned wrong response to IOA"},
367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
368 "8151: IOA microcode error"},
370 "Device bus status error"},
371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
372 "8157: IOA error requiring IOA reset to recover"},
374 "ATA device status error"},
376 "Message reject received from the device"},
377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
378 "8008: A permanent cache battery pack failure occurred"},
379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9090: Disk unit has been modified after the last known status"},
381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9081: IOA detected device error"},
383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9082: IOA detected device error"},
385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
386 "3110: Device bus error, message or command phase"},
387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
388 "3110: SAS Command / Task Management Function failed"},
389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9091: Incorrect hardware configuration change has been detected"},
391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9073: Invalid multi-adapter configuration"},
393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
394 "4010: Incorrect connection between cascaded expanders"},
395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
396 "4020: Connections exceed IOA design limits"},
397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
398 "4030: Incorrect multipath connection"},
399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
400 "4110: Unsupported enclosure function"},
401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402 "4120: SAS cable VPD cannot be read"},
403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
404 "FFF4: Command to logical unit failed"},
406 "Illegal request, invalid request type or request packet"},
408 "Illegal request, invalid resource handle"},
410 "Illegal request, commands not allowed to this device"},
412 "Illegal request, command not allowed to a secondary adapter"},
414 "Illegal request, command not allowed to a non-optimized resource"},
416 "Illegal request, invalid field in parameter list"},
418 "Illegal request, parameter not supported"},
420 "Illegal request, parameter value invalid"},
422 "Illegal request, command sequence error"},
424 "Illegal request, dual adapter support not enabled"},
426 "Illegal request, another cable connector was physically disabled"},
428 "Illegal request, inconsistent group id/group count"},
429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
430 "9031: Array protection temporarily suspended, protection resuming"},
431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
432 "9040: Array protection temporarily suspended, protection resuming"},
433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4080: IOA exceeded maximum operating temperature"},
435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436 "4085: Service required"},
437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
438 "3140: Device bus not ready to ready transition"},
439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
440 "FFFB: SCSI bus was reset"},
442 "FFFE: SCSI bus transition to single ended"},
444 "FFFE: SCSI bus transition to LVD"},
445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
446 "FFFB: SCSI bus was reset by another initiator"},
447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
448 "3029: A device replacement has occurred"},
449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450 "4102: Device bus fabric performance degradation"},
451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
452 "9051: IOA cache data exists for a missing or failed device"},
453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
456 "9025: Disk unit is not supported at its physical location"},
457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
458 "3020: IOA detected a SCSI bus configuration error"},
459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
460 "3150: SCSI bus configuration error"},
461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
462 "9074: Asymmetric advanced function disk configuration"},
463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
464 "4040: Incomplete multipath connection between IOA and enclosure"},
465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
466 "4041: Incomplete multipath connection between enclosure and device"},
467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
468 "9075: Incomplete multipath connection between IOA and remote IOA"},
469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
470 "9076: Configuration error, missing remote IOA"},
471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
472 "4050: Enclosure does not support a required multipath function"},
473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474 "4121: Configuration error, required cable is missing"},
475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476 "4122: Cable is not plugged into the correct location on remote IOA"},
477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478 "4123: Configuration error, invalid cable vital product data"},
479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480 "4124: Configuration error, both cable ends are plugged into the same IOA"},
481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482 "4070: Logically bad block written on device"},
483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
484 "9041: Array protection temporarily suspended"},
485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
486 "9042: Corrupt array parity detected on specified device"},
487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
488 "9030: Array no longer protected due to missing or failed disk unit"},
489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
490 "9071: Link operational transition"},
491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
492 "9072: Link not operational transition"},
493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
494 "9032: Array exposed but still protected"},
495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496 "70DD: Device forced failed by disrupt device command"},
497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
498 "4061: Multipath redundancy level got better"},
499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
500 "4060: Multipath redundancy level got worse"},
502 "Failure due to other device"},
503 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
504 "9008: IOA does not support functions expected by devices"},
505 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
506 "9010: Cache data associated with attached devices cannot be found"},
507 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
508 "9011: Cache data belongs to devices other than those attached"},
509 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
510 "9020: Array missing 2 or more devices with only 1 device present"},
511 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
512 "9021: Array missing 2 or more devices with 2 or more devices present"},
513 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
514 "9022: Exposed array is missing a required device"},
515 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
516 "9023: Array member(s) not at required physical locations"},
517 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
518 "9024: Array not functional due to present hardware configuration"},
519 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
520 "9026: Array not functional due to present hardware configuration"},
521 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
522 "9027: Array is missing a device and parity is out of sync"},
523 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
524 "9028: Maximum number of arrays already exist"},
525 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
526 "9050: Required cache data cannot be located for a disk unit"},
527 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
528 "9052: Cache data exists for a device that has been modified"},
529 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
530 "9054: IOA resources not available due to previous problems"},
531 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
532 "9092: Disk unit requires initialization before use"},
533 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
534 "9029: Incorrect hardware configuration change has been detected"},
535 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
536 "9060: One or more disk pairs are missing from an array"},
537 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
538 "9061: One or more disks are missing from an array"},
539 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
540 "9062: One or more disks are missing from an array"},
541 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
542 "9063: Maximum number of functional arrays has been exceeded"},
544 "Data protect, other volume set problem"},
546 "Aborted command, invalid descriptor"},
548 "Target operating conditions have changed, dual adapter takeover"},
550 "Aborted command, medium removal prevented"},
552 "Command terminated by host"},
554 "Aborted command, command terminated by host"}
557 static const struct ipr_ses_table_entry ipr_ses_table[] = {
558 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
559 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
560 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
561 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
562 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
563 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
564 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
565 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
568 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
569 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
570 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
574 * Function Prototypes
576 static int ipr_reset_alert(struct ipr_cmnd *);
577 static void ipr_process_ccn(struct ipr_cmnd *);
578 static void ipr_process_error(struct ipr_cmnd *);
579 static void ipr_reset_ioa_job(struct ipr_cmnd *);
580 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
581 enum ipr_shutdown_type);
583 #ifdef CONFIG_SCSI_IPR_TRACE
585 * ipr_trc_hook - Add a trace entry to the driver trace
586 * @ipr_cmd: ipr command struct
588 * @add_data: additional data
593 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
594 u8 type, u32 add_data)
596 struct ipr_trace_entry *trace_entry;
597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
599 trace_entry = &ioa_cfg->trace[atomic_add_return
600 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
601 trace_entry->time = jiffies;
602 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
603 trace_entry->type = type;
604 if (ipr_cmd->ioa_cfg->sis64)
605 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
607 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
608 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
609 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
610 trace_entry->u.add_data = add_data;
614 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
618 * ipr_lock_and_done - Acquire lock and complete command
619 * @ipr_cmd: ipr command struct
624 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
626 unsigned long lock_flags;
627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
629 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
630 ipr_cmd->done(ipr_cmd);
631 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
635 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
636 * @ipr_cmd: ipr command struct
641 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
643 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
644 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
645 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
646 dma_addr_t dma_addr = ipr_cmd->dma_addr;
649 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
650 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
651 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
652 ioarcb->data_transfer_length = 0;
653 ioarcb->read_data_transfer_length = 0;
654 ioarcb->ioadl_len = 0;
655 ioarcb->read_ioadl_len = 0;
657 if (ipr_cmd->ioa_cfg->sis64) {
658 ioarcb->u.sis64_addr_data.data_ioadl_addr =
659 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
660 ioasa64->u.gata.status = 0;
662 ioarcb->write_ioadl_addr =
663 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
664 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
665 ioasa->u.gata.status = 0;
668 ioasa->hdr.ioasc = 0;
669 ioasa->hdr.residual_data_len = 0;
670 ipr_cmd->scsi_cmd = NULL;
672 ipr_cmd->sense_buffer[0] = 0;
673 ipr_cmd->dma_use_sg = 0;
677 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
678 * @ipr_cmd: ipr command struct
683 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
684 void (*fast_done) (struct ipr_cmnd *))
686 ipr_reinit_ipr_cmnd(ipr_cmd);
687 ipr_cmd->u.scratch = 0;
688 ipr_cmd->sibling = NULL;
689 ipr_cmd->eh_comp = NULL;
690 ipr_cmd->fast_done = fast_done;
691 init_timer(&ipr_cmd->timer);
695 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
696 * @ioa_cfg: ioa config struct
699 * pointer to ipr command struct
702 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
704 struct ipr_cmnd *ipr_cmd = NULL;
706 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
707 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
708 struct ipr_cmnd, queue);
709 list_del(&ipr_cmd->queue);
717 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
718 * @ioa_cfg: ioa config struct
721 * pointer to ipr command struct
724 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
726 struct ipr_cmnd *ipr_cmd =
727 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
728 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
733 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
734 * @ioa_cfg: ioa config struct
735 * @clr_ints: interrupts to clear
737 * This function masks all interrupts on the adapter, then clears the
738 * interrupts specified in the mask
743 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
746 volatile u32 int_reg;
749 /* Stop new interrupts */
750 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
751 spin_lock(&ioa_cfg->hrrq[i]._lock);
752 ioa_cfg->hrrq[i].allow_interrupts = 0;
753 spin_unlock(&ioa_cfg->hrrq[i]._lock);
757 /* Set interrupt mask to stop all new interrupts */
759 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
761 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
763 /* Clear any pending interrupts */
765 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
766 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
767 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
771 * ipr_save_pcix_cmd_reg - Save PCI-X command register
772 * @ioa_cfg: ioa config struct
775 * 0 on success / -EIO on failure
777 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
779 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
781 if (pcix_cmd_reg == 0)
784 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
785 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
786 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
790 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
795 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
796 * @ioa_cfg: ioa config struct
799 * 0 on success / -EIO on failure
801 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
803 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
806 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
807 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
808 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817 * ipr_sata_eh_done - done function for aborted SATA commands
818 * @ipr_cmd: ipr command struct
820 * This function is invoked for ops generated to SATA
821 * devices which are being aborted.
826 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
828 struct ata_queued_cmd *qc = ipr_cmd->qc;
829 struct ipr_sata_port *sata_port = qc->ap->private_data;
831 qc->err_mask |= AC_ERR_OTHER;
832 sata_port->ioasa.status |= ATA_BUSY;
833 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
838 * ipr_scsi_eh_done - mid-layer done function for aborted ops
839 * @ipr_cmd: ipr command struct
841 * This function is invoked by the interrupt handler for
842 * ops generated by the SCSI mid-layer which are being aborted.
847 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
849 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
851 scsi_cmd->result |= (DID_ERROR << 16);
853 scsi_dma_unmap(ipr_cmd->scsi_cmd);
854 scsi_cmd->scsi_done(scsi_cmd);
855 if (ipr_cmd->eh_comp)
856 complete(ipr_cmd->eh_comp);
857 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
861 * ipr_fail_all_ops - Fails all outstanding ops.
862 * @ioa_cfg: ioa config struct
864 * This function fails all outstanding ops.
869 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
871 struct ipr_cmnd *ipr_cmd, *temp;
872 struct ipr_hrr_queue *hrrq;
875 for_each_hrrq(hrrq, ioa_cfg) {
876 spin_lock(&hrrq->_lock);
877 list_for_each_entry_safe(ipr_cmd,
878 temp, &hrrq->hrrq_pending_q, queue) {
879 list_del(&ipr_cmd->queue);
881 ipr_cmd->s.ioasa.hdr.ioasc =
882 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
883 ipr_cmd->s.ioasa.hdr.ilid =
884 cpu_to_be32(IPR_DRIVER_ILID);
886 if (ipr_cmd->scsi_cmd)
887 ipr_cmd->done = ipr_scsi_eh_done;
888 else if (ipr_cmd->qc)
889 ipr_cmd->done = ipr_sata_eh_done;
891 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
892 IPR_IOASC_IOA_WAS_RESET);
893 del_timer(&ipr_cmd->timer);
894 ipr_cmd->done(ipr_cmd);
896 spin_unlock(&hrrq->_lock);
902 * ipr_send_command - Send driver initiated requests.
903 * @ipr_cmd: ipr command struct
905 * This function sends a command to the adapter using the correct write call.
906 * In the case of sis64, calculate the ioarcb size required. Then or in the
912 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
914 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
915 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
917 if (ioa_cfg->sis64) {
918 /* The default size is 256 bytes */
919 send_dma_addr |= 0x1;
921 /* If the number of ioadls * size of ioadl > 128 bytes,
922 then use a 512 byte ioarcb */
923 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
924 send_dma_addr |= 0x4;
925 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
927 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
931 * ipr_do_req - Send driver initiated requests.
932 * @ipr_cmd: ipr command struct
933 * @done: done function
934 * @timeout_func: timeout function
935 * @timeout: timeout value
937 * This function sends the specified command to the adapter with the
938 * timeout given. The done function is invoked on command completion.
943 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
944 void (*done) (struct ipr_cmnd *),
945 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
947 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
949 ipr_cmd->done = done;
951 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
952 ipr_cmd->timer.expires = jiffies + timeout;
953 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
955 add_timer(&ipr_cmd->timer);
957 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
959 ipr_send_command(ipr_cmd);
963 * ipr_internal_cmd_done - Op done function for an internally generated op.
964 * @ipr_cmd: ipr command struct
966 * This function is the op done function for an internally generated,
967 * blocking op. It simply wakes the sleeping thread.
972 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
974 if (ipr_cmd->sibling)
975 ipr_cmd->sibling = NULL;
977 complete(&ipr_cmd->completion);
981 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
982 * @ipr_cmd: ipr command struct
983 * @dma_addr: dma address
984 * @len: transfer length
985 * @flags: ioadl flag value
987 * This function initializes an ioadl in the case where there is only a single
993 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
996 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
997 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
999 ipr_cmd->dma_use_sg = 1;
1001 if (ipr_cmd->ioa_cfg->sis64) {
1002 ioadl64->flags = cpu_to_be32(flags);
1003 ioadl64->data_len = cpu_to_be32(len);
1004 ioadl64->address = cpu_to_be64(dma_addr);
1006 ipr_cmd->ioarcb.ioadl_len =
1007 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1008 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1010 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1011 ioadl->address = cpu_to_be32(dma_addr);
1013 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1014 ipr_cmd->ioarcb.read_ioadl_len =
1015 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1016 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1018 ipr_cmd->ioarcb.ioadl_len =
1019 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1020 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1026 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1027 * @ipr_cmd: ipr command struct
1028 * @timeout_func: function to invoke if command times out
1034 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1035 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1038 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1040 init_completion(&ipr_cmd->completion);
1041 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1043 spin_unlock_irq(ioa_cfg->host->host_lock);
1044 wait_for_completion(&ipr_cmd->completion);
1045 spin_lock_irq(ioa_cfg->host->host_lock);
1048 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1050 if (ioa_cfg->hrrq_num == 1)
1053 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1057 * ipr_send_hcam - Send an HCAM to the adapter.
1058 * @ioa_cfg: ioa config struct
1060 * @hostrcb: hostrcb struct
1062 * This function will send a Host Controlled Async command to the adapter.
1063 * If HCAMs are currently not allowed to be issued to the adapter, it will
1064 * place the hostrcb on the free queue.
1069 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1070 struct ipr_hostrcb *hostrcb)
1072 struct ipr_cmnd *ipr_cmd;
1073 struct ipr_ioarcb *ioarcb;
1075 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1076 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1077 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1078 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1080 ipr_cmd->u.hostrcb = hostrcb;
1081 ioarcb = &ipr_cmd->ioarcb;
1083 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1084 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1085 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1086 ioarcb->cmd_pkt.cdb[1] = type;
1087 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1088 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1090 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1091 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1093 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1094 ipr_cmd->done = ipr_process_ccn;
1096 ipr_cmd->done = ipr_process_error;
1098 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1100 ipr_send_command(ipr_cmd);
1102 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1107 * ipr_update_ata_class - Update the ata class in the resource entry
1108 * @res: resource entry struct
1109 * @proto: cfgte device bus protocol value
1114 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1117 case IPR_PROTO_SATA:
1118 case IPR_PROTO_SAS_STP:
1119 res->ata_class = ATA_DEV_ATA;
1121 case IPR_PROTO_SATA_ATAPI:
1122 case IPR_PROTO_SAS_STP_ATAPI:
1123 res->ata_class = ATA_DEV_ATAPI;
1126 res->ata_class = ATA_DEV_UNKNOWN;
1132 * ipr_init_res_entry - Initialize a resource entry struct.
1133 * @res: resource entry struct
1134 * @cfgtew: config table entry wrapper struct
1139 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1140 struct ipr_config_table_entry_wrapper *cfgtew)
1144 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1145 struct ipr_resource_entry *gscsi_res = NULL;
1147 res->needs_sync_complete = 0;
1150 res->del_from_ml = 0;
1151 res->resetting_device = 0;
1152 res->reset_occurred = 0;
1154 res->sata_port = NULL;
1156 if (ioa_cfg->sis64) {
1157 proto = cfgtew->u.cfgte64->proto;
1158 res->res_flags = cfgtew->u.cfgte64->res_flags;
1159 res->qmodel = IPR_QUEUEING_MODEL64(res);
1160 res->type = cfgtew->u.cfgte64->res_type;
1162 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1163 sizeof(res->res_path));
1166 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1167 sizeof(res->dev_lun.scsi_lun));
1168 res->lun = scsilun_to_int(&res->dev_lun);
1170 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1171 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1172 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1174 res->target = gscsi_res->target;
1179 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1180 ioa_cfg->max_devs_supported);
1181 set_bit(res->target, ioa_cfg->target_ids);
1183 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1184 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1186 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1187 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1188 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1189 ioa_cfg->max_devs_supported);
1190 set_bit(res->target, ioa_cfg->array_ids);
1191 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1192 res->bus = IPR_VSET_VIRTUAL_BUS;
1193 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1194 ioa_cfg->max_devs_supported);
1195 set_bit(res->target, ioa_cfg->vset_ids);
1197 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1198 ioa_cfg->max_devs_supported);
1199 set_bit(res->target, ioa_cfg->target_ids);
1202 proto = cfgtew->u.cfgte->proto;
1203 res->qmodel = IPR_QUEUEING_MODEL(res);
1204 res->flags = cfgtew->u.cfgte->flags;
1205 if (res->flags & IPR_IS_IOA_RESOURCE)
1206 res->type = IPR_RES_TYPE_IOAFP;
1208 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1210 res->bus = cfgtew->u.cfgte->res_addr.bus;
1211 res->target = cfgtew->u.cfgte->res_addr.target;
1212 res->lun = cfgtew->u.cfgte->res_addr.lun;
1213 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1216 ipr_update_ata_class(res, proto);
1220 * ipr_is_same_device - Determine if two devices are the same.
1221 * @res: resource entry struct
1222 * @cfgtew: config table entry wrapper struct
1225 * 1 if the devices are the same / 0 otherwise
1227 static int ipr_is_same_device(struct ipr_resource_entry *res,
1228 struct ipr_config_table_entry_wrapper *cfgtew)
1230 if (res->ioa_cfg->sis64) {
1231 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1232 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1233 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1234 sizeof(cfgtew->u.cfgte64->lun))) {
1238 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1239 res->target == cfgtew->u.cfgte->res_addr.target &&
1240 res->lun == cfgtew->u.cfgte->res_addr.lun)
1248 * __ipr_format_res_path - Format the resource path for printing.
1249 * @res_path: resource path
1251 * @len: length of buffer provided
1256 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1262 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1263 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1264 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1270 * ipr_format_res_path - Format the resource path for printing.
1271 * @ioa_cfg: ioa config struct
1272 * @res_path: resource path
1274 * @len: length of buffer provided
1279 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1280 u8 *res_path, char *buffer, int len)
1285 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1286 __ipr_format_res_path(res_path, p, len - (buffer - p));
1291 * ipr_update_res_entry - Update the resource entry.
1292 * @res: resource entry struct
1293 * @cfgtew: config table entry wrapper struct
1298 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1299 struct ipr_config_table_entry_wrapper *cfgtew)
1301 char buffer[IPR_MAX_RES_PATH_LENGTH];
1305 if (res->ioa_cfg->sis64) {
1306 res->flags = cfgtew->u.cfgte64->flags;
1307 res->res_flags = cfgtew->u.cfgte64->res_flags;
1308 res->type = cfgtew->u.cfgte64->res_type;
1310 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1311 sizeof(struct ipr_std_inq_data));
1313 res->qmodel = IPR_QUEUEING_MODEL64(res);
1314 proto = cfgtew->u.cfgte64->proto;
1315 res->res_handle = cfgtew->u.cfgte64->res_handle;
1316 res->dev_id = cfgtew->u.cfgte64->dev_id;
1318 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1319 sizeof(res->dev_lun.scsi_lun));
1321 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1322 sizeof(res->res_path))) {
1323 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1324 sizeof(res->res_path));
1328 if (res->sdev && new_path)
1329 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1330 ipr_format_res_path(res->ioa_cfg,
1331 res->res_path, buffer, sizeof(buffer)));
1333 res->flags = cfgtew->u.cfgte->flags;
1334 if (res->flags & IPR_IS_IOA_RESOURCE)
1335 res->type = IPR_RES_TYPE_IOAFP;
1337 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1339 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1340 sizeof(struct ipr_std_inq_data));
1342 res->qmodel = IPR_QUEUEING_MODEL(res);
1343 proto = cfgtew->u.cfgte->proto;
1344 res->res_handle = cfgtew->u.cfgte->res_handle;
1347 ipr_update_ata_class(res, proto);
1351 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1353 * @res: resource entry struct
1354 * @cfgtew: config table entry wrapper struct
1359 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1361 struct ipr_resource_entry *gscsi_res = NULL;
1362 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1364 if (!ioa_cfg->sis64)
1367 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1368 clear_bit(res->target, ioa_cfg->array_ids);
1369 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1370 clear_bit(res->target, ioa_cfg->vset_ids);
1371 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1372 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1373 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1375 clear_bit(res->target, ioa_cfg->target_ids);
1377 } else if (res->bus == 0)
1378 clear_bit(res->target, ioa_cfg->target_ids);
1382 * ipr_handle_config_change - Handle a config change from the adapter
1383 * @ioa_cfg: ioa config struct
1389 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1390 struct ipr_hostrcb *hostrcb)
1392 struct ipr_resource_entry *res = NULL;
1393 struct ipr_config_table_entry_wrapper cfgtew;
1394 __be32 cc_res_handle;
1398 if (ioa_cfg->sis64) {
1399 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1400 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1402 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1403 cc_res_handle = cfgtew.u.cfgte->res_handle;
1406 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1407 if (res->res_handle == cc_res_handle) {
1414 if (list_empty(&ioa_cfg->free_res_q)) {
1415 ipr_send_hcam(ioa_cfg,
1416 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1421 res = list_entry(ioa_cfg->free_res_q.next,
1422 struct ipr_resource_entry, queue);
1424 list_del(&res->queue);
1425 ipr_init_res_entry(res, &cfgtew);
1426 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1429 ipr_update_res_entry(res, &cfgtew);
1431 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1433 res->del_from_ml = 1;
1434 res->res_handle = IPR_INVALID_RES_HANDLE;
1435 schedule_work(&ioa_cfg->work_q);
1437 ipr_clear_res_target(res);
1438 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1440 } else if (!res->sdev || res->del_from_ml) {
1442 schedule_work(&ioa_cfg->work_q);
1445 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1449 * ipr_process_ccn - Op done function for a CCN.
1450 * @ipr_cmd: ipr command struct
1452 * This function is the op done function for a configuration
1453 * change notification host controlled async from the adapter.
1458 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1460 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1461 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1462 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1464 list_del(&hostrcb->queue);
1465 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1468 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1469 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1470 dev_err(&ioa_cfg->pdev->dev,
1471 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1473 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1475 ipr_handle_config_change(ioa_cfg, hostrcb);
1480 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1481 * @i: index into buffer
1482 * @buf: string to modify
1484 * This function will strip all trailing whitespace, pad the end
1485 * of the string with a single space, and NULL terminate the string.
1488 * new length of string
1490 static int strip_and_pad_whitespace(int i, char *buf)
1492 while (i && buf[i] == ' ')
1500 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1501 * @prefix: string to print at start of printk
1502 * @hostrcb: hostrcb pointer
1503 * @vpd: vendor/product id/sn struct
1508 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1509 struct ipr_vpd *vpd)
1511 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1514 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1515 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1517 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1518 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1520 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1521 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1523 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1527 * ipr_log_vpd - Log the passed VPD to the error log.
1528 * @vpd: vendor/product id/sn struct
1533 static void ipr_log_vpd(struct ipr_vpd *vpd)
1535 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1536 + IPR_SERIAL_NUM_LEN];
1538 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1539 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1541 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1542 ipr_err("Vendor/Product ID: %s\n", buffer);
1544 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1545 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1546 ipr_err(" Serial Number: %s\n", buffer);
1550 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1551 * @prefix: string to print at start of printk
1552 * @hostrcb: hostrcb pointer
1553 * @vpd: vendor/product id/sn/wwn struct
1558 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1559 struct ipr_ext_vpd *vpd)
1561 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1562 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1563 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1567 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1568 * @vpd: vendor/product id/sn/wwn struct
1573 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1575 ipr_log_vpd(&vpd->vpd);
1576 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1577 be32_to_cpu(vpd->wwid[1]));
1581 * ipr_log_enhanced_cache_error - Log a cache error.
1582 * @ioa_cfg: ioa config struct
1583 * @hostrcb: hostrcb struct
1588 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1589 struct ipr_hostrcb *hostrcb)
1591 struct ipr_hostrcb_type_12_error *error;
1594 error = &hostrcb->hcam.u.error64.u.type_12_error;
1596 error = &hostrcb->hcam.u.error.u.type_12_error;
1598 ipr_err("-----Current Configuration-----\n");
1599 ipr_err("Cache Directory Card Information:\n");
1600 ipr_log_ext_vpd(&error->ioa_vpd);
1601 ipr_err("Adapter Card Information:\n");
1602 ipr_log_ext_vpd(&error->cfc_vpd);
1604 ipr_err("-----Expected Configuration-----\n");
1605 ipr_err("Cache Directory Card Information:\n");
1606 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1607 ipr_err("Adapter Card Information:\n");
1608 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1610 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1611 be32_to_cpu(error->ioa_data[0]),
1612 be32_to_cpu(error->ioa_data[1]),
1613 be32_to_cpu(error->ioa_data[2]));
1617 * ipr_log_cache_error - Log a cache error.
1618 * @ioa_cfg: ioa config struct
1619 * @hostrcb: hostrcb struct
1624 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1625 struct ipr_hostrcb *hostrcb)
1627 struct ipr_hostrcb_type_02_error *error =
1628 &hostrcb->hcam.u.error.u.type_02_error;
1630 ipr_err("-----Current Configuration-----\n");
1631 ipr_err("Cache Directory Card Information:\n");
1632 ipr_log_vpd(&error->ioa_vpd);
1633 ipr_err("Adapter Card Information:\n");
1634 ipr_log_vpd(&error->cfc_vpd);
1636 ipr_err("-----Expected Configuration-----\n");
1637 ipr_err("Cache Directory Card Information:\n");
1638 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1639 ipr_err("Adapter Card Information:\n");
1640 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1642 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1643 be32_to_cpu(error->ioa_data[0]),
1644 be32_to_cpu(error->ioa_data[1]),
1645 be32_to_cpu(error->ioa_data[2]));
1649 * ipr_log_enhanced_config_error - Log a configuration error.
1650 * @ioa_cfg: ioa config struct
1651 * @hostrcb: hostrcb struct
1656 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1657 struct ipr_hostrcb *hostrcb)
1659 int errors_logged, i;
1660 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1661 struct ipr_hostrcb_type_13_error *error;
1663 error = &hostrcb->hcam.u.error.u.type_13_error;
1664 errors_logged = be32_to_cpu(error->errors_logged);
1666 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1667 be32_to_cpu(error->errors_detected), errors_logged);
1669 dev_entry = error->dev;
1671 for (i = 0; i < errors_logged; i++, dev_entry++) {
1674 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1675 ipr_log_ext_vpd(&dev_entry->vpd);
1677 ipr_err("-----New Device Information-----\n");
1678 ipr_log_ext_vpd(&dev_entry->new_vpd);
1680 ipr_err("Cache Directory Card Information:\n");
1681 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1683 ipr_err("Adapter Card Information:\n");
1684 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1689 * ipr_log_sis64_config_error - Log a device error.
1690 * @ioa_cfg: ioa config struct
1691 * @hostrcb: hostrcb struct
1696 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1697 struct ipr_hostrcb *hostrcb)
1699 int errors_logged, i;
1700 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1701 struct ipr_hostrcb_type_23_error *error;
1702 char buffer[IPR_MAX_RES_PATH_LENGTH];
1704 error = &hostrcb->hcam.u.error64.u.type_23_error;
1705 errors_logged = be32_to_cpu(error->errors_logged);
1707 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1708 be32_to_cpu(error->errors_detected), errors_logged);
1710 dev_entry = error->dev;
1712 for (i = 0; i < errors_logged; i++, dev_entry++) {
1715 ipr_err("Device %d : %s", i + 1,
1716 __ipr_format_res_path(dev_entry->res_path,
1717 buffer, sizeof(buffer)));
1718 ipr_log_ext_vpd(&dev_entry->vpd);
1720 ipr_err("-----New Device Information-----\n");
1721 ipr_log_ext_vpd(&dev_entry->new_vpd);
1723 ipr_err("Cache Directory Card Information:\n");
1724 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1726 ipr_err("Adapter Card Information:\n");
1727 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1732 * ipr_log_config_error - Log a configuration error.
1733 * @ioa_cfg: ioa config struct
1734 * @hostrcb: hostrcb struct
1739 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1740 struct ipr_hostrcb *hostrcb)
1742 int errors_logged, i;
1743 struct ipr_hostrcb_device_data_entry *dev_entry;
1744 struct ipr_hostrcb_type_03_error *error;
1746 error = &hostrcb->hcam.u.error.u.type_03_error;
1747 errors_logged = be32_to_cpu(error->errors_logged);
1749 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1750 be32_to_cpu(error->errors_detected), errors_logged);
1752 dev_entry = error->dev;
1754 for (i = 0; i < errors_logged; i++, dev_entry++) {
1757 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1758 ipr_log_vpd(&dev_entry->vpd);
1760 ipr_err("-----New Device Information-----\n");
1761 ipr_log_vpd(&dev_entry->new_vpd);
1763 ipr_err("Cache Directory Card Information:\n");
1764 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1766 ipr_err("Adapter Card Information:\n");
1767 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1769 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1770 be32_to_cpu(dev_entry->ioa_data[0]),
1771 be32_to_cpu(dev_entry->ioa_data[1]),
1772 be32_to_cpu(dev_entry->ioa_data[2]),
1773 be32_to_cpu(dev_entry->ioa_data[3]),
1774 be32_to_cpu(dev_entry->ioa_data[4]));
1779 * ipr_log_enhanced_array_error - Log an array configuration error.
1780 * @ioa_cfg: ioa config struct
1781 * @hostrcb: hostrcb struct
1786 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1787 struct ipr_hostrcb *hostrcb)
1790 struct ipr_hostrcb_type_14_error *error;
1791 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1792 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1794 error = &hostrcb->hcam.u.error.u.type_14_error;
1798 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1799 error->protection_level,
1800 ioa_cfg->host->host_no,
1801 error->last_func_vset_res_addr.bus,
1802 error->last_func_vset_res_addr.target,
1803 error->last_func_vset_res_addr.lun);
1807 array_entry = error->array_member;
1808 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1809 ARRAY_SIZE(error->array_member));
1811 for (i = 0; i < num_entries; i++, array_entry++) {
1812 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1815 if (be32_to_cpu(error->exposed_mode_adn) == i)
1816 ipr_err("Exposed Array Member %d:\n", i);
1818 ipr_err("Array Member %d:\n", i);
1820 ipr_log_ext_vpd(&array_entry->vpd);
1821 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1822 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1823 "Expected Location");
1830 * ipr_log_array_error - Log an array configuration error.
1831 * @ioa_cfg: ioa config struct
1832 * @hostrcb: hostrcb struct
1837 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1838 struct ipr_hostrcb *hostrcb)
1841 struct ipr_hostrcb_type_04_error *error;
1842 struct ipr_hostrcb_array_data_entry *array_entry;
1843 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1845 error = &hostrcb->hcam.u.error.u.type_04_error;
1849 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1850 error->protection_level,
1851 ioa_cfg->host->host_no,
1852 error->last_func_vset_res_addr.bus,
1853 error->last_func_vset_res_addr.target,
1854 error->last_func_vset_res_addr.lun);
1858 array_entry = error->array_member;
1860 for (i = 0; i < 18; i++) {
1861 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1864 if (be32_to_cpu(error->exposed_mode_adn) == i)
1865 ipr_err("Exposed Array Member %d:\n", i);
1867 ipr_err("Array Member %d:\n", i);
1869 ipr_log_vpd(&array_entry->vpd);
1871 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1872 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1873 "Expected Location");
1878 array_entry = error->array_member2;
1885 * ipr_log_hex_data - Log additional hex IOA error data.
1886 * @ioa_cfg: ioa config struct
1887 * @data: IOA error data
1893 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1900 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1901 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1903 for (i = 0; i < len / 4; i += 4) {
1904 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1905 be32_to_cpu(data[i]),
1906 be32_to_cpu(data[i+1]),
1907 be32_to_cpu(data[i+2]),
1908 be32_to_cpu(data[i+3]));
1913 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1914 * @ioa_cfg: ioa config struct
1915 * @hostrcb: hostrcb struct
1920 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1921 struct ipr_hostrcb *hostrcb)
1923 struct ipr_hostrcb_type_17_error *error;
1926 error = &hostrcb->hcam.u.error64.u.type_17_error;
1928 error = &hostrcb->hcam.u.error.u.type_17_error;
1930 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1931 strim(error->failure_reason);
1933 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1934 be32_to_cpu(hostrcb->hcam.u.error.prc));
1935 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1936 ipr_log_hex_data(ioa_cfg, error->data,
1937 be32_to_cpu(hostrcb->hcam.length) -
1938 (offsetof(struct ipr_hostrcb_error, u) +
1939 offsetof(struct ipr_hostrcb_type_17_error, data)));
1943 * ipr_log_dual_ioa_error - Log a dual adapter error.
1944 * @ioa_cfg: ioa config struct
1945 * @hostrcb: hostrcb struct
1950 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1951 struct ipr_hostrcb *hostrcb)
1953 struct ipr_hostrcb_type_07_error *error;
1955 error = &hostrcb->hcam.u.error.u.type_07_error;
1956 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1957 strim(error->failure_reason);
1959 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1960 be32_to_cpu(hostrcb->hcam.u.error.prc));
1961 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1962 ipr_log_hex_data(ioa_cfg, error->data,
1963 be32_to_cpu(hostrcb->hcam.length) -
1964 (offsetof(struct ipr_hostrcb_error, u) +
1965 offsetof(struct ipr_hostrcb_type_07_error, data)));
1968 static const struct {
1971 } path_active_desc[] = {
1972 { IPR_PATH_NO_INFO, "Path" },
1973 { IPR_PATH_ACTIVE, "Active path" },
1974 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1977 static const struct {
1980 } path_state_desc[] = {
1981 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1982 { IPR_PATH_HEALTHY, "is healthy" },
1983 { IPR_PATH_DEGRADED, "is degraded" },
1984 { IPR_PATH_FAILED, "is failed" }
1988 * ipr_log_fabric_path - Log a fabric path error
1989 * @hostrcb: hostrcb struct
1990 * @fabric: fabric descriptor
1995 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1996 struct ipr_hostrcb_fabric_desc *fabric)
1999 u8 path_state = fabric->path_state;
2000 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2001 u8 state = path_state & IPR_PATH_STATE_MASK;
2003 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2004 if (path_active_desc[i].active != active)
2007 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2008 if (path_state_desc[j].state != state)
2011 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2012 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2013 path_active_desc[i].desc, path_state_desc[j].desc,
2015 } else if (fabric->cascaded_expander == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2017 path_active_desc[i].desc, path_state_desc[j].desc,
2018 fabric->ioa_port, fabric->phy);
2019 } else if (fabric->phy == 0xff) {
2020 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
2022 fabric->ioa_port, fabric->cascaded_expander);
2024 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2025 path_active_desc[i].desc, path_state_desc[j].desc,
2026 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2032 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2033 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2037 * ipr_log64_fabric_path - Log a fabric path error
2038 * @hostrcb: hostrcb struct
2039 * @fabric: fabric descriptor
2044 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2045 struct ipr_hostrcb64_fabric_desc *fabric)
2048 u8 path_state = fabric->path_state;
2049 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2050 u8 state = path_state & IPR_PATH_STATE_MASK;
2051 char buffer[IPR_MAX_RES_PATH_LENGTH];
2053 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2054 if (path_active_desc[i].active != active)
2057 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2058 if (path_state_desc[j].state != state)
2061 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2062 path_active_desc[i].desc, path_state_desc[j].desc,
2063 ipr_format_res_path(hostrcb->ioa_cfg,
2065 buffer, sizeof(buffer)));
2070 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2071 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2072 buffer, sizeof(buffer)));
2075 static const struct {
2078 } path_type_desc[] = {
2079 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2080 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2081 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2082 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2085 static const struct {
2088 } path_status_desc[] = {
2089 { IPR_PATH_CFG_NO_PROB, "Functional" },
2090 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2091 { IPR_PATH_CFG_FAILED, "Failed" },
2092 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2093 { IPR_PATH_NOT_DETECTED, "Missing" },
2094 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2097 static const char *link_rate[] = {
2100 "phy reset problem",
2117 * ipr_log_path_elem - Log a fabric path element.
2118 * @hostrcb: hostrcb struct
2119 * @cfg: fabric path element struct
2124 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2125 struct ipr_hostrcb_config_element *cfg)
2128 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2129 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2131 if (type == IPR_PATH_CFG_NOT_EXIST)
2134 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2135 if (path_type_desc[i].type != type)
2138 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2139 if (path_status_desc[j].status != status)
2142 if (type == IPR_PATH_CFG_IOA_PORT) {
2143 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2144 path_status_desc[j].desc, path_type_desc[i].desc,
2145 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2146 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2148 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2149 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2150 path_status_desc[j].desc, path_type_desc[i].desc,
2151 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2152 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2153 } else if (cfg->cascaded_expander == 0xff) {
2154 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2155 "WWN=%08X%08X\n", path_status_desc[j].desc,
2156 path_type_desc[i].desc, cfg->phy,
2157 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2158 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2159 } else if (cfg->phy == 0xff) {
2160 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2161 "WWN=%08X%08X\n", path_status_desc[j].desc,
2162 path_type_desc[i].desc, cfg->cascaded_expander,
2163 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2164 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2167 "WWN=%08X%08X\n", path_status_desc[j].desc,
2168 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2169 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2170 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2177 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2178 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2179 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2180 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2184 * ipr_log64_path_elem - Log a fabric path element.
2185 * @hostrcb: hostrcb struct
2186 * @cfg: fabric path element struct
2191 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2192 struct ipr_hostrcb64_config_element *cfg)
2195 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2196 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2197 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2198 char buffer[IPR_MAX_RES_PATH_LENGTH];
2200 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2203 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2204 if (path_type_desc[i].type != type)
2207 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2208 if (path_status_desc[j].status != status)
2211 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2212 path_status_desc[j].desc, path_type_desc[i].desc,
2213 ipr_format_res_path(hostrcb->ioa_cfg,
2214 cfg->res_path, buffer, sizeof(buffer)),
2215 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2216 be32_to_cpu(cfg->wwid[0]),
2217 be32_to_cpu(cfg->wwid[1]));
2221 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2222 "WWN=%08X%08X\n", cfg->type_status,
2223 ipr_format_res_path(hostrcb->ioa_cfg,
2224 cfg->res_path, buffer, sizeof(buffer)),
2225 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2226 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2230 * ipr_log_fabric_error - Log a fabric error.
2231 * @ioa_cfg: ioa config struct
2232 * @hostrcb: hostrcb struct
2237 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2238 struct ipr_hostrcb *hostrcb)
2240 struct ipr_hostrcb_type_20_error *error;
2241 struct ipr_hostrcb_fabric_desc *fabric;
2242 struct ipr_hostrcb_config_element *cfg;
2245 error = &hostrcb->hcam.u.error.u.type_20_error;
2246 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2247 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2249 add_len = be32_to_cpu(hostrcb->hcam.length) -
2250 (offsetof(struct ipr_hostrcb_error, u) +
2251 offsetof(struct ipr_hostrcb_type_20_error, desc));
2253 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2254 ipr_log_fabric_path(hostrcb, fabric);
2255 for_each_fabric_cfg(fabric, cfg)
2256 ipr_log_path_elem(hostrcb, cfg);
2258 add_len -= be16_to_cpu(fabric->length);
2259 fabric = (struct ipr_hostrcb_fabric_desc *)
2260 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2263 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2267 * ipr_log_sis64_array_error - Log a sis64 array error.
2268 * @ioa_cfg: ioa config struct
2269 * @hostrcb: hostrcb struct
2274 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2275 struct ipr_hostrcb *hostrcb)
2278 struct ipr_hostrcb_type_24_error *error;
2279 struct ipr_hostrcb64_array_data_entry *array_entry;
2280 char buffer[IPR_MAX_RES_PATH_LENGTH];
2281 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2283 error = &hostrcb->hcam.u.error64.u.type_24_error;
2287 ipr_err("RAID %s Array Configuration: %s\n",
2288 error->protection_level,
2289 ipr_format_res_path(ioa_cfg, error->last_res_path,
2290 buffer, sizeof(buffer)));
2294 array_entry = error->array_member;
2295 num_entries = min_t(u32, error->num_entries,
2296 ARRAY_SIZE(error->array_member));
2298 for (i = 0; i < num_entries; i++, array_entry++) {
2300 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2303 if (error->exposed_mode_adn == i)
2304 ipr_err("Exposed Array Member %d:\n", i);
2306 ipr_err("Array Member %d:\n", i);
2308 ipr_err("Array Member %d:\n", i);
2309 ipr_log_ext_vpd(&array_entry->vpd);
2310 ipr_err("Current Location: %s\n",
2311 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2312 buffer, sizeof(buffer)));
2313 ipr_err("Expected Location: %s\n",
2314 ipr_format_res_path(ioa_cfg,
2315 array_entry->expected_res_path,
2316 buffer, sizeof(buffer)));
2323 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2324 * @ioa_cfg: ioa config struct
2325 * @hostrcb: hostrcb struct
2330 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2331 struct ipr_hostrcb *hostrcb)
2333 struct ipr_hostrcb_type_30_error *error;
2334 struct ipr_hostrcb64_fabric_desc *fabric;
2335 struct ipr_hostrcb64_config_element *cfg;
2338 error = &hostrcb->hcam.u.error64.u.type_30_error;
2340 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2341 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2343 add_len = be32_to_cpu(hostrcb->hcam.length) -
2344 (offsetof(struct ipr_hostrcb64_error, u) +
2345 offsetof(struct ipr_hostrcb_type_30_error, desc));
2347 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2348 ipr_log64_fabric_path(hostrcb, fabric);
2349 for_each_fabric_cfg(fabric, cfg)
2350 ipr_log64_path_elem(hostrcb, cfg);
2352 add_len -= be16_to_cpu(fabric->length);
2353 fabric = (struct ipr_hostrcb64_fabric_desc *)
2354 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2357 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2361 * ipr_log_generic_error - Log an adapter error.
2362 * @ioa_cfg: ioa config struct
2363 * @hostrcb: hostrcb struct
2368 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2369 struct ipr_hostrcb *hostrcb)
2371 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2372 be32_to_cpu(hostrcb->hcam.length));
2376 * ipr_log_sis64_device_error - Log a cache error.
2377 * @ioa_cfg: ioa config struct
2378 * @hostrcb: hostrcb struct
2383 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2384 struct ipr_hostrcb *hostrcb)
2386 struct ipr_hostrcb_type_21_error *error;
2387 char buffer[IPR_MAX_RES_PATH_LENGTH];
2389 error = &hostrcb->hcam.u.error64.u.type_21_error;
2391 ipr_err("-----Failing Device Information-----\n");
2392 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2393 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2394 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2395 ipr_err("Device Resource Path: %s\n",
2396 __ipr_format_res_path(error->res_path,
2397 buffer, sizeof(buffer)));
2398 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2399 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2400 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2401 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2402 ipr_err("SCSI Sense Data:\n");
2403 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2404 ipr_err("SCSI Command Descriptor Block: \n");
2405 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2407 ipr_err("Additional IOA Data:\n");
2408 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2412 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2415 * This function will return the index of into the ipr_error_table
2416 * for the specified IOASC. If the IOASC is not in the table,
2417 * 0 will be returned, which points to the entry used for unknown errors.
2420 * index into the ipr_error_table
2422 static u32 ipr_get_error(u32 ioasc)
2426 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2427 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2434 * ipr_handle_log_data - Log an adapter error.
2435 * @ioa_cfg: ioa config struct
2436 * @hostrcb: hostrcb struct
2438 * This function logs an adapter error to the system.
2443 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2444 struct ipr_hostrcb *hostrcb)
2448 struct ipr_hostrcb_type_21_error *error;
2450 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2453 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2454 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2457 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2459 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2461 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2462 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2463 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2464 scsi_report_bus_reset(ioa_cfg->host,
2465 hostrcb->hcam.u.error.fd_res_addr.bus);
2468 error_index = ipr_get_error(ioasc);
2470 if (!ipr_error_table[error_index].log_hcam)
2473 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2474 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2475 error = &hostrcb->hcam.u.error64.u.type_21_error;
2477 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2478 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2482 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2484 /* Set indication we have logged an error */
2485 ioa_cfg->errors_logged++;
2487 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2489 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2490 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2492 switch (hostrcb->hcam.overlay_id) {
2493 case IPR_HOST_RCB_OVERLAY_ID_2:
2494 ipr_log_cache_error(ioa_cfg, hostrcb);
2496 case IPR_HOST_RCB_OVERLAY_ID_3:
2497 ipr_log_config_error(ioa_cfg, hostrcb);
2499 case IPR_HOST_RCB_OVERLAY_ID_4:
2500 case IPR_HOST_RCB_OVERLAY_ID_6:
2501 ipr_log_array_error(ioa_cfg, hostrcb);
2503 case IPR_HOST_RCB_OVERLAY_ID_7:
2504 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2506 case IPR_HOST_RCB_OVERLAY_ID_12:
2507 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2509 case IPR_HOST_RCB_OVERLAY_ID_13:
2510 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2512 case IPR_HOST_RCB_OVERLAY_ID_14:
2513 case IPR_HOST_RCB_OVERLAY_ID_16:
2514 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2516 case IPR_HOST_RCB_OVERLAY_ID_17:
2517 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2519 case IPR_HOST_RCB_OVERLAY_ID_20:
2520 ipr_log_fabric_error(ioa_cfg, hostrcb);
2522 case IPR_HOST_RCB_OVERLAY_ID_21:
2523 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2525 case IPR_HOST_RCB_OVERLAY_ID_23:
2526 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2528 case IPR_HOST_RCB_OVERLAY_ID_24:
2529 case IPR_HOST_RCB_OVERLAY_ID_26:
2530 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2532 case IPR_HOST_RCB_OVERLAY_ID_30:
2533 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2535 case IPR_HOST_RCB_OVERLAY_ID_1:
2536 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2538 ipr_log_generic_error(ioa_cfg, hostrcb);
2544 * ipr_process_error - Op done function for an adapter error log.
2545 * @ipr_cmd: ipr command struct
2547 * This function is the op done function for an error log host
2548 * controlled async from the adapter. It will log the error and
2549 * send the HCAM back to the adapter.
2554 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2556 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2557 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2558 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2562 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2564 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2566 list_del(&hostrcb->queue);
2567 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2570 ipr_handle_log_data(ioa_cfg, hostrcb);
2571 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2572 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2573 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2574 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2575 dev_err(&ioa_cfg->pdev->dev,
2576 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2579 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2583 * ipr_timeout - An internally generated op has timed out.
2584 * @ipr_cmd: ipr command struct
2586 * This function blocks host requests and initiates an
2592 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2594 unsigned long lock_flags = 0;
2595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2600 ioa_cfg->errors_logged++;
2601 dev_err(&ioa_cfg->pdev->dev,
2602 "Adapter being reset due to command timeout.\n");
2604 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2605 ioa_cfg->sdt_state = GET_DUMP;
2607 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2608 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2615 * ipr_oper_timeout - Adapter timed out transitioning to operational
2616 * @ipr_cmd: ipr command struct
2618 * This function blocks host requests and initiates an
2624 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2626 unsigned long lock_flags = 0;
2627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2632 ioa_cfg->errors_logged++;
2633 dev_err(&ioa_cfg->pdev->dev,
2634 "Adapter timed out transitioning to operational.\n");
2636 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2637 ioa_cfg->sdt_state = GET_DUMP;
2639 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2641 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2642 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2650 * ipr_find_ses_entry - Find matching SES in SES table
2651 * @res: resource entry struct of SES
2654 * pointer to SES table entry / NULL on failure
2656 static const struct ipr_ses_table_entry *
2657 ipr_find_ses_entry(struct ipr_resource_entry *res)
2660 struct ipr_std_inq_vpids *vpids;
2661 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2663 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2664 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2665 if (ste->compare_product_id_byte[j] == 'X') {
2666 vpids = &res->std_inq_data.vpids;
2667 if (vpids->product_id[j] == ste->product_id[j])
2675 if (matches == IPR_PROD_ID_LEN)
2683 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2684 * @ioa_cfg: ioa config struct
2686 * @bus_width: bus width
2689 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2690 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2691 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2692 * max 160MHz = max 320MB/sec).
2694 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2696 struct ipr_resource_entry *res;
2697 const struct ipr_ses_table_entry *ste;
2698 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2700 /* Loop through each config table entry in the config table buffer */
2701 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2702 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2705 if (bus != res->bus)
2708 if (!(ste = ipr_find_ses_entry(res)))
2711 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2714 return max_xfer_rate;
2718 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2719 * @ioa_cfg: ioa config struct
2720 * @max_delay: max delay in micro-seconds to wait
2722 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2725 * 0 on success / other on failure
2727 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2729 volatile u32 pcii_reg;
2732 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2733 while (delay < max_delay) {
2734 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2736 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2739 /* udelay cannot be used if delay is more than a few milliseconds */
2740 if ((delay / 1000) > MAX_UDELAY_MS)
2741 mdelay(delay / 1000);
2751 * ipr_get_sis64_dump_data_section - Dump IOA memory
2752 * @ioa_cfg: ioa config struct
2753 * @start_addr: adapter address to dump
2754 * @dest: destination kernel buffer
2755 * @length_in_words: length to dump in 4 byte words
2760 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2762 __be32 *dest, u32 length_in_words)
2766 for (i = 0; i < length_in_words; i++) {
2767 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2768 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2776 * ipr_get_ldump_data_section - Dump IOA memory
2777 * @ioa_cfg: ioa config struct
2778 * @start_addr: adapter address to dump
2779 * @dest: destination kernel buffer
2780 * @length_in_words: length to dump in 4 byte words
2783 * 0 on success / -EIO on failure
2785 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2787 __be32 *dest, u32 length_in_words)
2789 volatile u32 temp_pcii_reg;
2793 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2794 dest, length_in_words);
2796 /* Write IOA interrupt reg starting LDUMP state */
2797 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2798 ioa_cfg->regs.set_uproc_interrupt_reg32);
2800 /* Wait for IO debug acknowledge */
2801 if (ipr_wait_iodbg_ack(ioa_cfg,
2802 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2803 dev_err(&ioa_cfg->pdev->dev,
2804 "IOA dump long data transfer timeout\n");
2808 /* Signal LDUMP interlocked - clear IO debug ack */
2809 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2810 ioa_cfg->regs.clr_interrupt_reg);
2812 /* Write Mailbox with starting address */
2813 writel(start_addr, ioa_cfg->ioa_mailbox);
2815 /* Signal address valid - clear IOA Reset alert */
2816 writel(IPR_UPROCI_RESET_ALERT,
2817 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2819 for (i = 0; i < length_in_words; i++) {
2820 /* Wait for IO debug acknowledge */
2821 if (ipr_wait_iodbg_ack(ioa_cfg,
2822 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2823 dev_err(&ioa_cfg->pdev->dev,
2824 "IOA dump short data transfer timeout\n");
2828 /* Read data from mailbox and increment destination pointer */
2829 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2832 /* For all but the last word of data, signal data received */
2833 if (i < (length_in_words - 1)) {
2834 /* Signal dump data received - Clear IO debug Ack */
2835 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2836 ioa_cfg->regs.clr_interrupt_reg);
2840 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2841 writel(IPR_UPROCI_RESET_ALERT,
2842 ioa_cfg->regs.set_uproc_interrupt_reg32);
2844 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2845 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2847 /* Signal dump data received - Clear IO debug Ack */
2848 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2849 ioa_cfg->regs.clr_interrupt_reg);
2851 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2852 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2854 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2856 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2866 #ifdef CONFIG_SCSI_IPR_DUMP
2868 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2869 * @ioa_cfg: ioa config struct
2870 * @pci_address: adapter address
2871 * @length: length of data to copy
2873 * Copy data from PCI adapter to kernel buffer.
2874 * Note: length MUST be a 4 byte multiple
2876 * 0 on success / other on failure
2878 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2879 unsigned long pci_address, u32 length)
2881 int bytes_copied = 0;
2882 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2884 unsigned long lock_flags = 0;
2885 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2888 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2890 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2892 while (bytes_copied < length &&
2893 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2894 if (ioa_dump->page_offset >= PAGE_SIZE ||
2895 ioa_dump->page_offset == 0) {
2896 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2900 return bytes_copied;
2903 ioa_dump->page_offset = 0;
2904 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2905 ioa_dump->next_page_index++;
2907 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2909 rem_len = length - bytes_copied;
2910 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2911 cur_len = min(rem_len, rem_page_len);
2913 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2914 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2917 rc = ipr_get_ldump_data_section(ioa_cfg,
2918 pci_address + bytes_copied,
2919 &page[ioa_dump->page_offset / 4],
2920 (cur_len / sizeof(u32)));
2922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2925 ioa_dump->page_offset += cur_len;
2926 bytes_copied += cur_len;
2934 return bytes_copied;
2938 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2939 * @hdr: dump entry header struct
2944 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2946 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2948 hdr->offset = sizeof(*hdr);
2949 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2953 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2954 * @ioa_cfg: ioa config struct
2955 * @driver_dump: driver dump struct
2960 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2961 struct ipr_driver_dump *driver_dump)
2963 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2965 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2966 driver_dump->ioa_type_entry.hdr.len =
2967 sizeof(struct ipr_dump_ioa_type_entry) -
2968 sizeof(struct ipr_dump_entry_header);
2969 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2970 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2971 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2972 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2973 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2974 ucode_vpd->minor_release[1];
2975 driver_dump->hdr.num_entries++;
2979 * ipr_dump_version_data - Fill in the driver version in the dump.
2980 * @ioa_cfg: ioa config struct
2981 * @driver_dump: driver dump struct
2986 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2987 struct ipr_driver_dump *driver_dump)
2989 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2990 driver_dump->version_entry.hdr.len =
2991 sizeof(struct ipr_dump_version_entry) -
2992 sizeof(struct ipr_dump_entry_header);
2993 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2994 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2995 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2996 driver_dump->hdr.num_entries++;
3000 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3001 * @ioa_cfg: ioa config struct
3002 * @driver_dump: driver dump struct
3007 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3008 struct ipr_driver_dump *driver_dump)
3010 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3011 driver_dump->trace_entry.hdr.len =
3012 sizeof(struct ipr_dump_trace_entry) -
3013 sizeof(struct ipr_dump_entry_header);
3014 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3015 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3016 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3017 driver_dump->hdr.num_entries++;
3021 * ipr_dump_location_data - Fill in the IOA location in the dump.
3022 * @ioa_cfg: ioa config struct
3023 * @driver_dump: driver dump struct
3028 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3029 struct ipr_driver_dump *driver_dump)
3031 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3032 driver_dump->location_entry.hdr.len =
3033 sizeof(struct ipr_dump_location_entry) -
3034 sizeof(struct ipr_dump_entry_header);
3035 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3036 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3037 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3038 driver_dump->hdr.num_entries++;
3042 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3043 * @ioa_cfg: ioa config struct
3044 * @dump: dump struct
3049 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3051 unsigned long start_addr, sdt_word;
3052 unsigned long lock_flags = 0;
3053 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3054 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3055 u32 num_entries, max_num_entries, start_off, end_off;
3056 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3057 struct ipr_sdt *sdt;
3063 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3065 if (ioa_cfg->sdt_state != READ_DUMP) {
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3070 if (ioa_cfg->sis64) {
3071 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072 ssleep(IPR_DUMP_DELAY_SECONDS);
3073 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3076 start_addr = readl(ioa_cfg->ioa_mailbox);
3078 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3079 dev_err(&ioa_cfg->pdev->dev,
3080 "Invalid dump table format: %lx\n", start_addr);
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3087 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3089 /* Initialize the overall dump header */
3090 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3091 driver_dump->hdr.num_entries = 1;
3092 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3093 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3094 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3095 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3097 ipr_dump_version_data(ioa_cfg, driver_dump);
3098 ipr_dump_location_data(ioa_cfg, driver_dump);
3099 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3100 ipr_dump_trace_data(ioa_cfg, driver_dump);
3102 /* Update dump_header */
3103 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3105 /* IOA Dump entry */
3106 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3107 ioa_dump->hdr.len = 0;
3108 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3109 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3111 /* First entries in sdt are actually a list of dump addresses and
3112 lengths to gather the real dump data. sdt represents the pointer
3113 to the ioa generated dump table. Dump data will be extracted based
3114 on entries in this table */
3115 sdt = &ioa_dump->sdt;
3117 if (ioa_cfg->sis64) {
3118 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3119 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3121 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3122 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3125 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3126 (max_num_entries * sizeof(struct ipr_sdt_entry));
3127 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3128 bytes_to_copy / sizeof(__be32));
3130 /* Smart Dump table is ready to use and the first entry is valid */
3131 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3132 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3133 dev_err(&ioa_cfg->pdev->dev,
3134 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3135 rc, be32_to_cpu(sdt->hdr.state));
3136 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3137 ioa_cfg->sdt_state = DUMP_OBTAINED;
3138 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3144 if (num_entries > max_num_entries)
3145 num_entries = max_num_entries;
3147 /* Update dump length to the actual data to be copied */
3148 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3150 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3152 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3156 for (i = 0; i < num_entries; i++) {
3157 if (ioa_dump->hdr.len > max_dump_size) {
3158 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3162 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3163 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3165 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3167 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3168 end_off = be32_to_cpu(sdt->entry[i].end_token);
3170 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3171 bytes_to_copy = end_off - start_off;
3176 if (bytes_to_copy > max_dump_size) {
3177 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3181 /* Copy data from adapter to driver buffers */
3182 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3185 ioa_dump->hdr.len += bytes_copied;
3187 if (bytes_copied != bytes_to_copy) {
3188 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3195 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3197 /* Update dump_header */
3198 driver_dump->hdr.len += ioa_dump->hdr.len;
3200 ioa_cfg->sdt_state = DUMP_OBTAINED;
3205 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3209 * ipr_release_dump - Free adapter dump memory
3210 * @kref: kref struct
3215 static void ipr_release_dump(struct kref *kref)
3217 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3218 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3219 unsigned long lock_flags = 0;
3223 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224 ioa_cfg->dump = NULL;
3225 ioa_cfg->sdt_state = INACTIVE;
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3228 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3229 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3231 vfree(dump->ioa_dump.ioa_data);
3237 * ipr_worker_thread - Worker thread
3238 * @work: ioa config struct
3240 * Called at task level from a work thread. This function takes care
3241 * of adding and removing device from the mid-layer as configuration
3242 * changes are detected by the adapter.
3247 static void ipr_worker_thread(struct work_struct *work)
3249 unsigned long lock_flags;
3250 struct ipr_resource_entry *res;
3251 struct scsi_device *sdev;
3252 struct ipr_dump *dump;
3253 struct ipr_ioa_cfg *ioa_cfg =
3254 container_of(work, struct ipr_ioa_cfg, work_q);
3255 u8 bus, target, lun;
3259 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3261 if (ioa_cfg->sdt_state == READ_DUMP) {
3262 dump = ioa_cfg->dump;
3264 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267 kref_get(&dump->kref);
3268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269 ipr_get_ioa_dump(ioa_cfg, dump);
3270 kref_put(&dump->kref, ipr_release_dump);
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3274 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3287 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3288 if (res->del_from_ml && res->sdev) {
3291 if (!scsi_device_get(sdev)) {
3292 if (!res->add_to_ml)
3293 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3295 res->del_from_ml = 0;
3296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297 scsi_remove_device(sdev);
3298 scsi_device_put(sdev);
3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3306 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3307 if (res->add_to_ml) {
3309 target = res->target;
3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3313 scsi_add_device(ioa_cfg->host, bus, target, lun);
3314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3319 ioa_cfg->scan_done = 1;
3320 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3321 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3325 #ifdef CONFIG_SCSI_IPR_TRACE
3327 * ipr_read_trace - Dump the adapter trace
3328 * @filp: open sysfs file
3329 * @kobj: kobject struct
3330 * @bin_attr: bin_attribute struct
3333 * @count: buffer size
3336 * number of bytes printed to buffer
3338 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3339 struct bin_attribute *bin_attr,
3340 char *buf, loff_t off, size_t count)
3342 struct device *dev = container_of(kobj, struct device, kobj);
3343 struct Scsi_Host *shost = class_to_shost(dev);
3344 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3345 unsigned long lock_flags = 0;
3348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3349 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 static struct bin_attribute ipr_trace_attr = {
3362 .read = ipr_read_trace,
3367 * ipr_show_fw_version - Show the firmware version
3368 * @dev: class device struct
3372 * number of bytes printed to buffer
3374 static ssize_t ipr_show_fw_version(struct device *dev,
3375 struct device_attribute *attr, char *buf)
3377 struct Scsi_Host *shost = class_to_shost(dev);
3378 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3379 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3380 unsigned long lock_flags = 0;
3383 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3384 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3385 ucode_vpd->major_release, ucode_vpd->card_type,
3386 ucode_vpd->minor_release[0],
3387 ucode_vpd->minor_release[1]);
3388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3392 static struct device_attribute ipr_fw_version_attr = {
3394 .name = "fw_version",
3397 .show = ipr_show_fw_version,
3401 * ipr_show_log_level - Show the adapter's error logging level
3402 * @dev: class device struct
3406 * number of bytes printed to buffer
3408 static ssize_t ipr_show_log_level(struct device *dev,
3409 struct device_attribute *attr, char *buf)
3411 struct Scsi_Host *shost = class_to_shost(dev);
3412 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3413 unsigned long lock_flags = 0;
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3423 * ipr_store_log_level - Change the adapter's error logging level
3424 * @dev: class device struct
3428 * number of bytes printed to buffer
3430 static ssize_t ipr_store_log_level(struct device *dev,
3431 struct device_attribute *attr,
3432 const char *buf, size_t count)
3434 struct Scsi_Host *shost = class_to_shost(dev);
3435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436 unsigned long lock_flags = 0;
3438 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3439 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3440 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3444 static struct device_attribute ipr_log_level_attr = {
3446 .name = "log_level",
3447 .mode = S_IRUGO | S_IWUSR,
3449 .show = ipr_show_log_level,
3450 .store = ipr_store_log_level
3454 * ipr_store_diagnostics - IOA Diagnostics interface
3455 * @dev: device struct
3457 * @count: buffer size
3459 * This function will reset the adapter and wait a reasonable
3460 * amount of time for any errors that the adapter might log.
3463 * count on success / other on failure
3465 static ssize_t ipr_store_diagnostics(struct device *dev,
3466 struct device_attribute *attr,
3467 const char *buf, size_t count)
3469 struct Scsi_Host *shost = class_to_shost(dev);
3470 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3471 unsigned long lock_flags = 0;
3474 if (!capable(CAP_SYS_ADMIN))
3477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3478 while (ioa_cfg->in_reset_reload) {
3479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3480 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3484 ioa_cfg->errors_logged = 0;
3485 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3487 if (ioa_cfg->in_reset_reload) {
3488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3491 /* Wait for a second for any errors to be logged */
3494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3499 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3506 static struct device_attribute ipr_diagnostics_attr = {
3508 .name = "run_diagnostics",
3511 .store = ipr_store_diagnostics
3515 * ipr_show_adapter_state - Show the adapter's state
3516 * @class_dev: device struct
3520 * number of bytes printed to buffer
3522 static ssize_t ipr_show_adapter_state(struct device *dev,
3523 struct device_attribute *attr, char *buf)
3525 struct Scsi_Host *shost = class_to_shost(dev);
3526 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3527 unsigned long lock_flags = 0;
3530 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3531 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3532 len = snprintf(buf, PAGE_SIZE, "offline\n");
3534 len = snprintf(buf, PAGE_SIZE, "online\n");
3535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3540 * ipr_store_adapter_state - Change adapter state
3541 * @dev: device struct
3543 * @count: buffer size
3545 * This function will change the adapter's state.
3548 * count on success / other on failure
3550 static ssize_t ipr_store_adapter_state(struct device *dev,
3551 struct device_attribute *attr,
3552 const char *buf, size_t count)
3554 struct Scsi_Host *shost = class_to_shost(dev);
3555 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3556 unsigned long lock_flags;
3557 int result = count, i;
3559 if (!capable(CAP_SYS_ADMIN))
3562 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3563 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3564 !strncmp(buf, "online", 6)) {
3565 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3566 spin_lock(&ioa_cfg->hrrq[i]._lock);
3567 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3568 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3571 ioa_cfg->reset_retries = 0;
3572 ioa_cfg->in_ioa_bringdown = 0;
3573 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3576 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3581 static struct device_attribute ipr_ioa_state_attr = {
3583 .name = "online_state",
3584 .mode = S_IRUGO | S_IWUSR,
3586 .show = ipr_show_adapter_state,
3587 .store = ipr_store_adapter_state
3591 * ipr_store_reset_adapter - Reset the adapter
3592 * @dev: device struct
3594 * @count: buffer size
3596 * This function will reset the adapter.
3599 * count on success / other on failure
3601 static ssize_t ipr_store_reset_adapter(struct device *dev,
3602 struct device_attribute *attr,
3603 const char *buf, size_t count)
3605 struct Scsi_Host *shost = class_to_shost(dev);
3606 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3607 unsigned long lock_flags;
3610 if (!capable(CAP_SYS_ADMIN))
3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614 if (!ioa_cfg->in_reset_reload)
3615 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3617 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622 static struct device_attribute ipr_ioa_reset_attr = {
3624 .name = "reset_host",
3627 .store = ipr_store_reset_adapter
3630 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3632 * ipr_show_iopoll_weight - Show ipr polling mode
3633 * @dev: class device struct
3637 * number of bytes printed to buffer
3639 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3640 struct device_attribute *attr, char *buf)
3642 struct Scsi_Host *shost = class_to_shost(dev);
3643 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3644 unsigned long lock_flags = 0;
3647 spin_lock_irqsave(shost->host_lock, lock_flags);
3648 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3649 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3655 * ipr_store_iopoll_weight - Change the adapter's polling mode
3656 * @dev: class device struct
3660 * number of bytes printed to buffer
3662 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3663 struct device_attribute *attr,
3664 const char *buf, size_t count)
3666 struct Scsi_Host *shost = class_to_shost(dev);
3667 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3668 unsigned long user_iopoll_weight;
3669 unsigned long lock_flags = 0;
3672 if (!ioa_cfg->sis64) {
3673 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3676 if (kstrtoul(buf, 10, &user_iopoll_weight))
3679 if (user_iopoll_weight > 256) {
3680 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3684 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3685 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3689 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3690 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3691 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3694 spin_lock_irqsave(shost->host_lock, lock_flags);
3695 ioa_cfg->iopoll_weight = user_iopoll_weight;
3696 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3697 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3698 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3699 ioa_cfg->iopoll_weight, ipr_iopoll);
3700 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3703 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3708 static struct device_attribute ipr_iopoll_weight_attr = {
3710 .name = "iopoll_weight",
3711 .mode = S_IRUGO | S_IWUSR,
3713 .show = ipr_show_iopoll_weight,
3714 .store = ipr_store_iopoll_weight
3718 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3719 * @buf_len: buffer length
3721 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3722 * list to use for microcode download
3725 * pointer to sglist / NULL on failure
3727 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3729 int sg_size, order, bsize_elem, num_elem, i, j;
3730 struct ipr_sglist *sglist;
3731 struct scatterlist *scatterlist;
3734 /* Get the minimum size per scatter/gather element */
3735 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3737 /* Get the actual size per element */
3738 order = get_order(sg_size);
3740 /* Determine the actual number of bytes per element */
3741 bsize_elem = PAGE_SIZE * (1 << order);
3743 /* Determine the actual number of sg entries needed */
3744 if (buf_len % bsize_elem)
3745 num_elem = (buf_len / bsize_elem) + 1;
3747 num_elem = buf_len / bsize_elem;
3749 /* Allocate a scatter/gather list for the DMA */
3750 sglist = kzalloc(sizeof(struct ipr_sglist) +
3751 (sizeof(struct scatterlist) * (num_elem - 1)),
3754 if (sglist == NULL) {
3759 scatterlist = sglist->scatterlist;
3760 sg_init_table(scatterlist, num_elem);
3762 sglist->order = order;
3763 sglist->num_sg = num_elem;
3765 /* Allocate a bunch of sg elements */
3766 for (i = 0; i < num_elem; i++) {
3767 page = alloc_pages(GFP_KERNEL, order);
3771 /* Free up what we already allocated */
3772 for (j = i - 1; j >= 0; j--)
3773 __free_pages(sg_page(&scatterlist[j]), order);
3778 sg_set_page(&scatterlist[i], page, 0, 0);
3785 * ipr_free_ucode_buffer - Frees a microcode download buffer
3786 * @p_dnld: scatter/gather list pointer
3788 * Free a DMA'able ucode download buffer previously allocated with
3789 * ipr_alloc_ucode_buffer
3794 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3798 for (i = 0; i < sglist->num_sg; i++)
3799 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3805 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3806 * @sglist: scatter/gather list pointer
3807 * @buffer: buffer pointer
3808 * @len: buffer length
3810 * Copy a microcode image from a user buffer into a buffer allocated by
3811 * ipr_alloc_ucode_buffer
3814 * 0 on success / other on failure
3816 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3817 u8 *buffer, u32 len)
3819 int bsize_elem, i, result = 0;
3820 struct scatterlist *scatterlist;
3823 /* Determine the actual number of bytes per element */
3824 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3826 scatterlist = sglist->scatterlist;
3828 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3829 struct page *page = sg_page(&scatterlist[i]);
3832 memcpy(kaddr, buffer, bsize_elem);
3835 scatterlist[i].length = bsize_elem;
3843 if (len % bsize_elem) {
3844 struct page *page = sg_page(&scatterlist[i]);
3847 memcpy(kaddr, buffer, len % bsize_elem);
3850 scatterlist[i].length = len % bsize_elem;
3853 sglist->buffer_len = len;
3858 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3859 * @ipr_cmd: ipr command struct
3860 * @sglist: scatter/gather list
3862 * Builds a microcode download IOA data list (IOADL).
3865 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3866 struct ipr_sglist *sglist)
3868 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3869 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3870 struct scatterlist *scatterlist = sglist->scatterlist;
3873 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3874 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3875 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3878 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3879 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3880 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3881 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3882 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3885 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3889 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3890 * @ipr_cmd: ipr command struct
3891 * @sglist: scatter/gather list
3893 * Builds a microcode download IOA data list (IOADL).
3896 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3897 struct ipr_sglist *sglist)
3899 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3900 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3901 struct scatterlist *scatterlist = sglist->scatterlist;
3904 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3905 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3906 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3909 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3911 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3912 ioadl[i].flags_and_data_len =
3913 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3915 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3918 ioadl[i-1].flags_and_data_len |=
3919 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3923 * ipr_update_ioa_ucode - Update IOA's microcode
3924 * @ioa_cfg: ioa config struct
3925 * @sglist: scatter/gather list
3927 * Initiate an adapter reset to update the IOA's microcode
3930 * 0 on success / -EIO on failure
3932 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3933 struct ipr_sglist *sglist)
3935 unsigned long lock_flags;
3937 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3938 while (ioa_cfg->in_reset_reload) {
3939 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3940 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3941 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3944 if (ioa_cfg->ucode_sglist) {
3945 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3946 dev_err(&ioa_cfg->pdev->dev,
3947 "Microcode download already in progress\n");
3951 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3952 sglist->scatterlist, sglist->num_sg,
3955 if (!sglist->num_dma_sg) {
3956 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3957 dev_err(&ioa_cfg->pdev->dev,
3958 "Failed to map microcode download buffer!\n");
3962 ioa_cfg->ucode_sglist = sglist;
3963 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3964 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3965 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3967 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3968 ioa_cfg->ucode_sglist = NULL;
3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3974 * ipr_store_update_fw - Update the firmware on the adapter
3975 * @class_dev: device struct
3977 * @count: buffer size
3979 * This function will update the firmware on the adapter.
3982 * count on success / other on failure
3984 static ssize_t ipr_store_update_fw(struct device *dev,
3985 struct device_attribute *attr,
3986 const char *buf, size_t count)
3988 struct Scsi_Host *shost = class_to_shost(dev);
3989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3990 struct ipr_ucode_image_header *image_hdr;
3991 const struct firmware *fw_entry;
3992 struct ipr_sglist *sglist;
3995 int len, result, dnld_size;
3997 if (!capable(CAP_SYS_ADMIN))
4000 len = snprintf(fname, 99, "%s", buf);
4001 fname[len-1] = '\0';
4003 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4004 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4008 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4010 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4011 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4012 sglist = ipr_alloc_ucode_buffer(dnld_size);
4015 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4016 release_firmware(fw_entry);
4020 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4023 dev_err(&ioa_cfg->pdev->dev,
4024 "Microcode buffer copy to DMA buffer failed\n");
4028 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4030 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4035 ipr_free_ucode_buffer(sglist);
4036 release_firmware(fw_entry);
4040 static struct device_attribute ipr_update_fw_attr = {
4042 .name = "update_fw",
4045 .store = ipr_store_update_fw
4049 * ipr_show_fw_type - Show the adapter's firmware type.
4050 * @dev: class device struct
4054 * number of bytes printed to buffer
4056 static ssize_t ipr_show_fw_type(struct device *dev,
4057 struct device_attribute *attr, char *buf)
4059 struct Scsi_Host *shost = class_to_shost(dev);
4060 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4061 unsigned long lock_flags = 0;
4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4065 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4070 static struct device_attribute ipr_ioa_fw_type_attr = {
4075 .show = ipr_show_fw_type
4078 static struct device_attribute *ipr_ioa_attrs[] = {
4079 &ipr_fw_version_attr,
4080 &ipr_log_level_attr,
4081 &ipr_diagnostics_attr,
4082 &ipr_ioa_state_attr,
4083 &ipr_ioa_reset_attr,
4084 &ipr_update_fw_attr,
4085 &ipr_ioa_fw_type_attr,
4086 &ipr_iopoll_weight_attr,
4090 #ifdef CONFIG_SCSI_IPR_DUMP
4092 * ipr_read_dump - Dump the adapter
4093 * @filp: open sysfs file
4094 * @kobj: kobject struct
4095 * @bin_attr: bin_attribute struct
4098 * @count: buffer size
4101 * number of bytes printed to buffer
4103 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4104 struct bin_attribute *bin_attr,
4105 char *buf, loff_t off, size_t count)
4107 struct device *cdev = container_of(kobj, struct device, kobj);
4108 struct Scsi_Host *shost = class_to_shost(cdev);
4109 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4110 struct ipr_dump *dump;
4111 unsigned long lock_flags = 0;
4116 if (!capable(CAP_SYS_ADMIN))
4119 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4120 dump = ioa_cfg->dump;
4122 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4123 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4126 kref_get(&dump->kref);
4127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4129 if (off > dump->driver_dump.hdr.len) {
4130 kref_put(&dump->kref, ipr_release_dump);
4134 if (off + count > dump->driver_dump.hdr.len) {
4135 count = dump->driver_dump.hdr.len - off;
4139 if (count && off < sizeof(dump->driver_dump)) {
4140 if (off + count > sizeof(dump->driver_dump))
4141 len = sizeof(dump->driver_dump) - off;
4144 src = (u8 *)&dump->driver_dump + off;
4145 memcpy(buf, src, len);
4151 off -= sizeof(dump->driver_dump);
4154 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4155 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4156 sizeof(struct ipr_sdt_entry));
4158 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4159 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4161 if (count && off < sdt_end) {
4162 if (off + count > sdt_end)
4163 len = sdt_end - off;
4166 src = (u8 *)&dump->ioa_dump + off;
4167 memcpy(buf, src, len);
4176 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4177 len = PAGE_ALIGN(off) - off;
4180 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4181 src += off & ~PAGE_MASK;
4182 memcpy(buf, src, len);
4188 kref_put(&dump->kref, ipr_release_dump);
4193 * ipr_alloc_dump - Prepare for adapter dump
4194 * @ioa_cfg: ioa config struct
4197 * 0 on success / other on failure
4199 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4201 struct ipr_dump *dump;
4203 unsigned long lock_flags = 0;
4205 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4208 ipr_err("Dump memory allocation failed\n");
4213 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4215 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4218 ipr_err("Dump memory allocation failed\n");
4223 dump->ioa_dump.ioa_data = ioa_data;
4225 kref_init(&dump->kref);
4226 dump->ioa_cfg = ioa_cfg;
4228 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4230 if (INACTIVE != ioa_cfg->sdt_state) {
4231 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4232 vfree(dump->ioa_dump.ioa_data);
4237 ioa_cfg->dump = dump;
4238 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4239 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4240 ioa_cfg->dump_taken = 1;
4241 schedule_work(&ioa_cfg->work_q);
4243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249 * ipr_free_dump - Free adapter dump memory
4250 * @ioa_cfg: ioa config struct
4253 * 0 on success / other on failure
4255 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4257 struct ipr_dump *dump;
4258 unsigned long lock_flags = 0;
4262 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4263 dump = ioa_cfg->dump;
4265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4269 ioa_cfg->dump = NULL;
4270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4272 kref_put(&dump->kref, ipr_release_dump);
4279 * ipr_write_dump - Setup dump state of adapter
4280 * @filp: open sysfs file
4281 * @kobj: kobject struct
4282 * @bin_attr: bin_attribute struct
4285 * @count: buffer size
4288 * number of bytes printed to buffer
4290 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4291 struct bin_attribute *bin_attr,
4292 char *buf, loff_t off, size_t count)
4294 struct device *cdev = container_of(kobj, struct device, kobj);
4295 struct Scsi_Host *shost = class_to_shost(cdev);
4296 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4299 if (!capable(CAP_SYS_ADMIN))
4303 rc = ipr_alloc_dump(ioa_cfg);
4304 else if (buf[0] == '0')
4305 rc = ipr_free_dump(ioa_cfg);
4315 static struct bin_attribute ipr_dump_attr = {
4318 .mode = S_IRUSR | S_IWUSR,
4321 .read = ipr_read_dump,
4322 .write = ipr_write_dump
4325 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4329 * ipr_change_queue_depth - Change the device's queue depth
4330 * @sdev: scsi device struct
4331 * @qdepth: depth to set
4332 * @reason: calling context
4337 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4339 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4340 struct ipr_resource_entry *res;
4341 unsigned long lock_flags = 0;
4343 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4344 res = (struct ipr_resource_entry *)sdev->hostdata;
4346 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4347 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4348 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4350 scsi_change_queue_depth(sdev, qdepth);
4351 return sdev->queue_depth;
4355 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4356 * @dev: device struct
4357 * @attr: device attribute structure
4361 * number of bytes printed to buffer
4363 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4365 struct scsi_device *sdev = to_scsi_device(dev);
4366 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4367 struct ipr_resource_entry *res;
4368 unsigned long lock_flags = 0;
4369 ssize_t len = -ENXIO;
4371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4372 res = (struct ipr_resource_entry *)sdev->hostdata;
4374 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4375 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4379 static struct device_attribute ipr_adapter_handle_attr = {
4381 .name = "adapter_handle",
4384 .show = ipr_show_adapter_handle
4388 * ipr_show_resource_path - Show the resource path or the resource address for
4390 * @dev: device struct
4391 * @attr: device attribute structure
4395 * number of bytes printed to buffer
4397 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4399 struct scsi_device *sdev = to_scsi_device(dev);
4400 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4401 struct ipr_resource_entry *res;
4402 unsigned long lock_flags = 0;
4403 ssize_t len = -ENXIO;
4404 char buffer[IPR_MAX_RES_PATH_LENGTH];
4406 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4407 res = (struct ipr_resource_entry *)sdev->hostdata;
4408 if (res && ioa_cfg->sis64)
4409 len = snprintf(buf, PAGE_SIZE, "%s\n",
4410 __ipr_format_res_path(res->res_path, buffer,
4413 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4414 res->bus, res->target, res->lun);
4416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4420 static struct device_attribute ipr_resource_path_attr = {
4422 .name = "resource_path",
4425 .show = ipr_show_resource_path
4429 * ipr_show_device_id - Show the device_id for this device.
4430 * @dev: device struct
4431 * @attr: device attribute structure
4435 * number of bytes printed to buffer
4437 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4439 struct scsi_device *sdev = to_scsi_device(dev);
4440 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4441 struct ipr_resource_entry *res;
4442 unsigned long lock_flags = 0;
4443 ssize_t len = -ENXIO;
4445 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4446 res = (struct ipr_resource_entry *)sdev->hostdata;
4447 if (res && ioa_cfg->sis64)
4448 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4450 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4452 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4456 static struct device_attribute ipr_device_id_attr = {
4458 .name = "device_id",
4461 .show = ipr_show_device_id
4465 * ipr_show_resource_type - Show the resource type for this device.
4466 * @dev: device struct
4467 * @attr: device attribute structure
4471 * number of bytes printed to buffer
4473 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4475 struct scsi_device *sdev = to_scsi_device(dev);
4476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4477 struct ipr_resource_entry *res;
4478 unsigned long lock_flags = 0;
4479 ssize_t len = -ENXIO;
4481 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4482 res = (struct ipr_resource_entry *)sdev->hostdata;
4485 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4487 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4491 static struct device_attribute ipr_resource_type_attr = {
4493 .name = "resource_type",
4496 .show = ipr_show_resource_type
4499 static struct device_attribute *ipr_dev_attrs[] = {
4500 &ipr_adapter_handle_attr,
4501 &ipr_resource_path_attr,
4502 &ipr_device_id_attr,
4503 &ipr_resource_type_attr,
4508 * ipr_biosparam - Return the HSC mapping
4509 * @sdev: scsi device struct
4510 * @block_device: block device pointer
4511 * @capacity: capacity of the device
4512 * @parm: Array containing returned HSC values.
4514 * This function generates the HSC parms that fdisk uses.
4515 * We want to make sure we return something that places partitions
4516 * on 4k boundaries for best performance with the IOA.
4521 static int ipr_biosparam(struct scsi_device *sdev,
4522 struct block_device *block_device,
4523 sector_t capacity, int *parm)
4531 cylinders = capacity;
4532 sector_div(cylinders, (128 * 32));
4537 parm[2] = cylinders;
4543 * ipr_find_starget - Find target based on bus/target.
4544 * @starget: scsi target struct
4547 * resource entry pointer if found / NULL if not found
4549 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4551 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4553 struct ipr_resource_entry *res;
4555 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4556 if ((res->bus == starget->channel) &&
4557 (res->target == starget->id)) {
4565 static struct ata_port_info sata_port_info;
4568 * ipr_target_alloc - Prepare for commands to a SCSI target
4569 * @starget: scsi target struct
4571 * If the device is a SATA device, this function allocates an
4572 * ATA port with libata, else it does nothing.
4575 * 0 on success / non-0 on failure
4577 static int ipr_target_alloc(struct scsi_target *starget)
4579 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4580 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4581 struct ipr_sata_port *sata_port;
4582 struct ata_port *ap;
4583 struct ipr_resource_entry *res;
4584 unsigned long lock_flags;
4586 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4587 res = ipr_find_starget(starget);
4588 starget->hostdata = NULL;
4590 if (res && ipr_is_gata(res)) {
4591 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4592 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4596 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4598 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4599 sata_port->ioa_cfg = ioa_cfg;
4601 sata_port->res = res;
4603 res->sata_port = sata_port;
4604 ap->private_data = sata_port;
4605 starget->hostdata = sata_port;
4611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617 * ipr_target_destroy - Destroy a SCSI target
4618 * @starget: scsi target struct
4620 * If the device was a SATA device, this function frees the libata
4621 * ATA port, else it does nothing.
4624 static void ipr_target_destroy(struct scsi_target *starget)
4626 struct ipr_sata_port *sata_port = starget->hostdata;
4627 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4628 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4630 if (ioa_cfg->sis64) {
4631 if (!ipr_find_starget(starget)) {
4632 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4633 clear_bit(starget->id, ioa_cfg->array_ids);
4634 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4635 clear_bit(starget->id, ioa_cfg->vset_ids);
4636 else if (starget->channel == 0)
4637 clear_bit(starget->id, ioa_cfg->target_ids);
4642 starget->hostdata = NULL;
4643 ata_sas_port_destroy(sata_port->ap);
4649 * ipr_find_sdev - Find device based on bus/target/lun.
4650 * @sdev: scsi device struct
4653 * resource entry pointer if found / NULL if not found
4655 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4658 struct ipr_resource_entry *res;
4660 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4661 if ((res->bus == sdev->channel) &&
4662 (res->target == sdev->id) &&
4663 (res->lun == sdev->lun))
4671 * ipr_slave_destroy - Unconfigure a SCSI device
4672 * @sdev: scsi device struct
4677 static void ipr_slave_destroy(struct scsi_device *sdev)
4679 struct ipr_resource_entry *res;
4680 struct ipr_ioa_cfg *ioa_cfg;
4681 unsigned long lock_flags = 0;
4683 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4686 res = (struct ipr_resource_entry *) sdev->hostdata;
4689 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4690 sdev->hostdata = NULL;
4692 res->sata_port = NULL;
4694 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4698 * ipr_slave_configure - Configure a SCSI device
4699 * @sdev: scsi device struct
4701 * This function configures the specified scsi device.
4706 static int ipr_slave_configure(struct scsi_device *sdev)
4708 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4709 struct ipr_resource_entry *res;
4710 struct ata_port *ap = NULL;
4711 unsigned long lock_flags = 0;
4712 char buffer[IPR_MAX_RES_PATH_LENGTH];
4714 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4715 res = sdev->hostdata;
4717 if (ipr_is_af_dasd_device(res))
4718 sdev->type = TYPE_RAID;
4719 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4720 sdev->scsi_level = 4;
4721 sdev->no_uld_attach = 1;
4723 if (ipr_is_vset_device(res)) {
4724 sdev->scsi_level = SCSI_SPC_3;
4725 blk_queue_rq_timeout(sdev->request_queue,
4726 IPR_VSET_RW_TIMEOUT);
4727 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4729 if (ipr_is_gata(res) && res->sata_port)
4730 ap = res->sata_port->ap;
4731 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4734 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4735 ata_sas_slave_configure(sdev, ap);
4739 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4740 ipr_format_res_path(ioa_cfg,
4741 res->res_path, buffer, sizeof(buffer)));
4744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4749 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4750 * @sdev: scsi device struct
4752 * This function initializes an ATA port so that future commands
4753 * sent through queuecommand will work.
4758 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4760 struct ipr_sata_port *sata_port = NULL;
4764 if (sdev->sdev_target)
4765 sata_port = sdev->sdev_target->hostdata;
4767 rc = ata_sas_port_init(sata_port->ap);
4769 rc = ata_sas_sync_probe(sata_port->ap);
4773 ipr_slave_destroy(sdev);
4780 * ipr_slave_alloc - Prepare for commands to a device.
4781 * @sdev: scsi device struct
4783 * This function saves a pointer to the resource entry
4784 * in the scsi device struct if the device exists. We
4785 * can then use this pointer in ipr_queuecommand when
4786 * handling new commands.
4789 * 0 on success / -ENXIO if device does not exist
4791 static int ipr_slave_alloc(struct scsi_device *sdev)
4793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4794 struct ipr_resource_entry *res;
4795 unsigned long lock_flags;
4798 sdev->hostdata = NULL;
4800 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4802 res = ipr_find_sdev(sdev);
4807 sdev->hostdata = res;
4808 if (!ipr_is_naca_model(res))
4809 res->needs_sync_complete = 1;
4811 if (ipr_is_gata(res)) {
4812 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4813 return ipr_ata_slave_alloc(sdev);
4817 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4823 * ipr_match_lun - Match function for specified LUN
4824 * @ipr_cmd: ipr command struct
4825 * @device: device to match (sdev)
4828 * 1 if command matches sdev / 0 if command does not match sdev
4830 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4832 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4838 * ipr_wait_for_ops - Wait for matching commands to complete
4839 * @ipr_cmd: ipr command struct
4840 * @device: device to match (sdev)
4841 * @match: match function to use
4846 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4847 int (*match)(struct ipr_cmnd *, void *))
4849 struct ipr_cmnd *ipr_cmd;
4851 unsigned long flags;
4852 struct ipr_hrr_queue *hrrq;
4853 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4854 DECLARE_COMPLETION_ONSTACK(comp);
4860 for_each_hrrq(hrrq, ioa_cfg) {
4861 spin_lock_irqsave(hrrq->lock, flags);
4862 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4863 if (match(ipr_cmd, device)) {
4864 ipr_cmd->eh_comp = ∁
4868 spin_unlock_irqrestore(hrrq->lock, flags);
4872 timeout = wait_for_completion_timeout(&comp, timeout);
4877 for_each_hrrq(hrrq, ioa_cfg) {
4878 spin_lock_irqsave(hrrq->lock, flags);
4879 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4880 if (match(ipr_cmd, device)) {
4881 ipr_cmd->eh_comp = NULL;
4885 spin_unlock_irqrestore(hrrq->lock, flags);
4889 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4891 return wait ? FAILED : SUCCESS;
4900 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4902 struct ipr_ioa_cfg *ioa_cfg;
4903 unsigned long lock_flags = 0;
4907 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4908 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4910 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4911 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4912 dev_err(&ioa_cfg->pdev->dev,
4913 "Adapter being reset as a result of error recovery.\n");
4915 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4916 ioa_cfg->sdt_state = GET_DUMP;
4919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4920 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4921 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4923 /* If we got hit with a host reset while we were already resetting
4924 the adapter for some reason, and the reset failed. */
4925 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4936 * ipr_device_reset - Reset the device
4937 * @ioa_cfg: ioa config struct
4938 * @res: resource entry struct
4940 * This function issues a device reset to the affected device.
4941 * If the device is a SCSI device, a LUN reset will be sent
4942 * to the device first. If that does not work, a target reset
4943 * will be sent. If the device is a SATA device, a PHY reset will
4947 * 0 on success / non-zero on failure
4949 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4950 struct ipr_resource_entry *res)
4952 struct ipr_cmnd *ipr_cmd;
4953 struct ipr_ioarcb *ioarcb;
4954 struct ipr_cmd_pkt *cmd_pkt;
4955 struct ipr_ioarcb_ata_regs *regs;
4959 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4960 ioarcb = &ipr_cmd->ioarcb;
4961 cmd_pkt = &ioarcb->cmd_pkt;
4963 if (ipr_cmd->ioa_cfg->sis64) {
4964 regs = &ipr_cmd->i.ata_ioadl.regs;
4965 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4967 regs = &ioarcb->u.add_data.u.regs;
4969 ioarcb->res_handle = res->res_handle;
4970 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4971 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4972 if (ipr_is_gata(res)) {
4973 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4974 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4975 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4978 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4979 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4980 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
4981 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4982 if (ipr_cmd->ioa_cfg->sis64)
4983 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4984 sizeof(struct ipr_ioasa_gata));
4986 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4987 sizeof(struct ipr_ioasa_gata));
4991 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
4995 * ipr_sata_reset - Reset the SATA port
4996 * @link: SATA link to reset
4997 * @classes: class of the attached device
4999 * This function issues a SATA phy reset to the affected ATA link.
5002 * 0 on success / non-zero on failure
5004 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5005 unsigned long deadline)
5007 struct ipr_sata_port *sata_port = link->ap->private_data;
5008 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5009 struct ipr_resource_entry *res;
5010 unsigned long lock_flags = 0;
5014 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5015 while (ioa_cfg->in_reset_reload) {
5016 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5017 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5018 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5021 res = sata_port->res;
5023 rc = ipr_device_reset(ioa_cfg, res);
5024 *classes = res->ata_class;
5027 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5033 * ipr_eh_dev_reset - Reset the device
5034 * @scsi_cmd: scsi command struct
5036 * This function issues a device reset to the affected device.
5037 * A LUN reset will be sent to the device first. If that does
5038 * not work, a target reset will be sent.
5043 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5045 struct ipr_cmnd *ipr_cmd;
5046 struct ipr_ioa_cfg *ioa_cfg;
5047 struct ipr_resource_entry *res;
5048 struct ata_port *ap;
5050 struct ipr_hrr_queue *hrrq;
5053 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5054 res = scsi_cmd->device->hostdata;
5060 * If we are currently going through reset/reload, return failed. This will force the
5061 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5064 if (ioa_cfg->in_reset_reload)
5066 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5069 for_each_hrrq(hrrq, ioa_cfg) {
5070 spin_lock(&hrrq->_lock);
5071 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5072 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5073 if (ipr_cmd->scsi_cmd)
5074 ipr_cmd->done = ipr_scsi_eh_done;
5076 ipr_cmd->done = ipr_sata_eh_done;
5078 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5079 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5080 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5084 spin_unlock(&hrrq->_lock);
5086 res->resetting_device = 1;
5087 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5089 if (ipr_is_gata(res) && res->sata_port) {
5090 ap = res->sata_port->ap;
5091 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5092 ata_std_error_handler(ap);
5093 spin_lock_irq(scsi_cmd->device->host->host_lock);
5095 for_each_hrrq(hrrq, ioa_cfg) {
5096 spin_lock(&hrrq->_lock);
5097 list_for_each_entry(ipr_cmd,
5098 &hrrq->hrrq_pending_q, queue) {
5099 if (ipr_cmd->ioarcb.res_handle ==
5105 spin_unlock(&hrrq->_lock);
5108 rc = ipr_device_reset(ioa_cfg, res);
5109 res->resetting_device = 0;
5110 res->reset_occurred = 1;
5113 return rc ? FAILED : SUCCESS;
5116 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5119 struct ipr_ioa_cfg *ioa_cfg;
5121 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5123 spin_lock_irq(cmd->device->host->host_lock);
5124 rc = __ipr_eh_dev_reset(cmd);
5125 spin_unlock_irq(cmd->device->host->host_lock);
5128 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5134 * ipr_bus_reset_done - Op done function for bus reset.
5135 * @ipr_cmd: ipr command struct
5137 * This function is the op done function for a bus reset
5142 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5145 struct ipr_resource_entry *res;
5148 if (!ioa_cfg->sis64)
5149 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5150 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5151 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5157 * If abort has not completed, indicate the reset has, else call the
5158 * abort's done function to wake the sleeping eh thread
5160 if (ipr_cmd->sibling->sibling)
5161 ipr_cmd->sibling->sibling = NULL;
5163 ipr_cmd->sibling->done(ipr_cmd->sibling);
5165 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5170 * ipr_abort_timeout - An abort task has timed out
5171 * @ipr_cmd: ipr command struct
5173 * This function handles when an abort task times out. If this
5174 * happens we issue a bus reset since we have resources tied
5175 * up that must be freed before returning to the midlayer.
5180 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5182 struct ipr_cmnd *reset_cmd;
5183 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5184 struct ipr_cmd_pkt *cmd_pkt;
5185 unsigned long lock_flags = 0;
5188 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5189 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5190 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5194 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5195 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5196 ipr_cmd->sibling = reset_cmd;
5197 reset_cmd->sibling = ipr_cmd;
5198 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5199 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5200 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5201 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5202 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5204 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5205 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5210 * ipr_cancel_op - Cancel specified op
5211 * @scsi_cmd: scsi command struct
5213 * This function cancels specified op.
5218 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5220 struct ipr_cmnd *ipr_cmd;
5221 struct ipr_ioa_cfg *ioa_cfg;
5222 struct ipr_resource_entry *res;
5223 struct ipr_cmd_pkt *cmd_pkt;
5226 struct ipr_hrr_queue *hrrq;
5229 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5230 res = scsi_cmd->device->hostdata;
5232 /* If we are currently going through reset/reload, return failed.
5233 * This will force the mid-layer to call ipr_eh_host_reset,
5234 * which will then go to sleep and wait for the reset to complete
5236 if (ioa_cfg->in_reset_reload ||
5237 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5243 * If we are aborting a timed out op, chances are that the timeout was caused
5244 * by a still not detected EEH error. In such cases, reading a register will
5245 * trigger the EEH recovery infrastructure.
5247 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5249 if (!ipr_is_gscsi(res))
5252 for_each_hrrq(hrrq, ioa_cfg) {
5253 spin_lock(&hrrq->_lock);
5254 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5255 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5256 ipr_cmd->done = ipr_scsi_eh_done;
5261 spin_unlock(&hrrq->_lock);
5267 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5268 ipr_cmd->ioarcb.res_handle = res->res_handle;
5269 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5270 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5271 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5272 ipr_cmd->u.sdev = scsi_cmd->device;
5274 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5276 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5277 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5280 * If the abort task timed out and we sent a bus reset, we will get
5281 * one the following responses to the abort
5283 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5288 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5289 if (!ipr_is_naca_model(res))
5290 res->needs_sync_complete = 1;
5293 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5297 * ipr_eh_abort - Abort a single op
5298 * @scsi_cmd: scsi command struct
5301 * 0 if scan in progress / 1 if scan is complete
5303 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5305 unsigned long lock_flags;
5306 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5309 spin_lock_irqsave(shost->host_lock, lock_flags);
5310 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5312 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5314 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5319 * ipr_eh_host_reset - Reset the host adapter
5320 * @scsi_cmd: scsi command struct
5325 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5327 unsigned long flags;
5329 struct ipr_ioa_cfg *ioa_cfg;
5333 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5335 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5336 rc = ipr_cancel_op(scsi_cmd);
5337 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5340 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5346 * ipr_handle_other_interrupt - Handle "other" interrupts
5347 * @ioa_cfg: ioa config struct
5348 * @int_reg: interrupt register
5351 * IRQ_NONE / IRQ_HANDLED
5353 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5356 irqreturn_t rc = IRQ_HANDLED;
5359 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5360 int_reg &= ~int_mask_reg;
5362 /* If an interrupt on the adapter did not occur, ignore it.
5363 * Or in the case of SIS 64, check for a stage change interrupt.
5365 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5366 if (ioa_cfg->sis64) {
5367 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5368 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5369 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5371 /* clear stage change */
5372 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5373 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5374 list_del(&ioa_cfg->reset_cmd->queue);
5375 del_timer(&ioa_cfg->reset_cmd->timer);
5376 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5384 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5385 /* Mask the interrupt */
5386 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5387 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5389 list_del(&ioa_cfg->reset_cmd->queue);
5390 del_timer(&ioa_cfg->reset_cmd->timer);
5391 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5392 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5393 if (ioa_cfg->clear_isr) {
5394 if (ipr_debug && printk_ratelimit())
5395 dev_err(&ioa_cfg->pdev->dev,
5396 "Spurious interrupt detected. 0x%08X\n", int_reg);
5397 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5398 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5402 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5403 ioa_cfg->ioa_unit_checked = 1;
5404 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5405 dev_err(&ioa_cfg->pdev->dev,
5406 "No Host RRQ. 0x%08X\n", int_reg);
5408 dev_err(&ioa_cfg->pdev->dev,
5409 "Permanent IOA failure. 0x%08X\n", int_reg);
5411 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5412 ioa_cfg->sdt_state = GET_DUMP;
5414 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5422 * ipr_isr_eh - Interrupt service routine error handler
5423 * @ioa_cfg: ioa config struct
5424 * @msg: message to log
5429 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5431 ioa_cfg->errors_logged++;
5432 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5434 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5435 ioa_cfg->sdt_state = GET_DUMP;
5437 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5440 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5441 struct list_head *doneq)
5445 struct ipr_cmnd *ipr_cmd;
5446 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5449 /* If interrupts are disabled, ignore the interrupt */
5450 if (!hrr_queue->allow_interrupts)
5453 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5454 hrr_queue->toggle_bit) {
5456 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5457 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5458 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5460 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5461 cmd_index < hrr_queue->min_cmd_id)) {
5463 "Invalid response handle from IOA: ",
5468 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5469 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5471 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5473 list_move_tail(&ipr_cmd->queue, doneq);
5475 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5476 hrr_queue->hrrq_curr++;
5478 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5479 hrr_queue->toggle_bit ^= 1u;
5482 if (budget > 0 && num_hrrq >= budget)
5489 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5491 struct ipr_ioa_cfg *ioa_cfg;
5492 struct ipr_hrr_queue *hrrq;
5493 struct ipr_cmnd *ipr_cmd, *temp;
5494 unsigned long hrrq_flags;
5498 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5499 ioa_cfg = hrrq->ioa_cfg;
5501 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5502 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5504 if (completed_ops < budget)
5505 blk_iopoll_complete(iop);
5506 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5508 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5509 list_del(&ipr_cmd->queue);
5510 del_timer(&ipr_cmd->timer);
5511 ipr_cmd->fast_done(ipr_cmd);
5514 return completed_ops;
5518 * ipr_isr - Interrupt service routine
5520 * @devp: pointer to ioa config struct
5523 * IRQ_NONE / IRQ_HANDLED
5525 static irqreturn_t ipr_isr(int irq, void *devp)
5527 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5528 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5529 unsigned long hrrq_flags = 0;
5533 struct ipr_cmnd *ipr_cmd, *temp;
5534 irqreturn_t rc = IRQ_NONE;
5537 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5538 /* If interrupts are disabled, ignore the interrupt */
5539 if (!hrrq->allow_interrupts) {
5540 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5545 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5548 if (!ioa_cfg->clear_isr)
5551 /* Clear the PCI interrupt */
5554 writel(IPR_PCII_HRRQ_UPDATED,
5555 ioa_cfg->regs.clr_interrupt_reg32);
5556 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5557 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5558 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5560 } else if (rc == IRQ_NONE && irq_none == 0) {
5561 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5563 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5564 int_reg & IPR_PCII_HRRQ_UPDATED) {
5566 "Error clearing HRRQ: ", num_hrrq);
5573 if (unlikely(rc == IRQ_NONE))
5574 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5576 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5577 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5578 list_del(&ipr_cmd->queue);
5579 del_timer(&ipr_cmd->timer);
5580 ipr_cmd->fast_done(ipr_cmd);
5586 * ipr_isr_mhrrq - Interrupt service routine
5588 * @devp: pointer to ioa config struct
5591 * IRQ_NONE / IRQ_HANDLED
5593 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5595 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5596 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5597 unsigned long hrrq_flags = 0;
5598 struct ipr_cmnd *ipr_cmd, *temp;
5599 irqreturn_t rc = IRQ_NONE;
5602 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5604 /* If interrupts are disabled, ignore the interrupt */
5605 if (!hrrq->allow_interrupts) {
5606 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5610 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5611 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5613 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5614 blk_iopoll_sched(&hrrq->iopoll);
5615 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5619 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5622 if (ipr_process_hrrq(hrrq, -1, &doneq))
5626 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5628 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5629 list_del(&ipr_cmd->queue);
5630 del_timer(&ipr_cmd->timer);
5631 ipr_cmd->fast_done(ipr_cmd);
5637 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5638 * @ioa_cfg: ioa config struct
5639 * @ipr_cmd: ipr command struct
5642 * 0 on success / -1 on failure
5644 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5645 struct ipr_cmnd *ipr_cmd)
5648 struct scatterlist *sg;
5650 u32 ioadl_flags = 0;
5651 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5652 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5653 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5655 length = scsi_bufflen(scsi_cmd);
5659 nseg = scsi_dma_map(scsi_cmd);
5661 if (printk_ratelimit())
5662 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5666 ipr_cmd->dma_use_sg = nseg;
5668 ioarcb->data_transfer_length = cpu_to_be32(length);
5670 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5672 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5673 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5674 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5675 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5676 ioadl_flags = IPR_IOADL_FLAGS_READ;
5678 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5679 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5680 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5681 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5684 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5689 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5690 * @ioa_cfg: ioa config struct
5691 * @ipr_cmd: ipr command struct
5694 * 0 on success / -1 on failure
5696 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5697 struct ipr_cmnd *ipr_cmd)
5700 struct scatterlist *sg;
5702 u32 ioadl_flags = 0;
5703 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5704 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5705 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5707 length = scsi_bufflen(scsi_cmd);
5711 nseg = scsi_dma_map(scsi_cmd);
5713 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5717 ipr_cmd->dma_use_sg = nseg;
5719 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5720 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5721 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5722 ioarcb->data_transfer_length = cpu_to_be32(length);
5724 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5725 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5726 ioadl_flags = IPR_IOADL_FLAGS_READ;
5727 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5728 ioarcb->read_ioadl_len =
5729 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5732 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5733 ioadl = ioarcb->u.add_data.u.ioadl;
5734 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5735 offsetof(struct ipr_ioarcb, u.add_data));
5736 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5739 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5740 ioadl[i].flags_and_data_len =
5741 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5742 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5745 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5750 * ipr_erp_done - Process completion of ERP for a device
5751 * @ipr_cmd: ipr command struct
5753 * This function copies the sense buffer into the scsi_cmd
5754 * struct and pushes the scsi_done function.
5759 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5761 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5762 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5763 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5765 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5766 scsi_cmd->result |= (DID_ERROR << 16);
5767 scmd_printk(KERN_ERR, scsi_cmd,
5768 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5770 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5771 SCSI_SENSE_BUFFERSIZE);
5775 if (!ipr_is_naca_model(res))
5776 res->needs_sync_complete = 1;
5779 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5780 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5781 scsi_cmd->scsi_done(scsi_cmd);
5785 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5786 * @ipr_cmd: ipr command struct
5791 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5793 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5794 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5795 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5797 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5798 ioarcb->data_transfer_length = 0;
5799 ioarcb->read_data_transfer_length = 0;
5800 ioarcb->ioadl_len = 0;
5801 ioarcb->read_ioadl_len = 0;
5802 ioasa->hdr.ioasc = 0;
5803 ioasa->hdr.residual_data_len = 0;
5805 if (ipr_cmd->ioa_cfg->sis64)
5806 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5807 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5809 ioarcb->write_ioadl_addr =
5810 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5811 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5816 * ipr_erp_request_sense - Send request sense to a device
5817 * @ipr_cmd: ipr command struct
5819 * This function sends a request sense to a device as a result
5820 * of a check condition.
5825 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5827 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5828 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5830 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5831 ipr_erp_done(ipr_cmd);
5835 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5837 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5838 cmd_pkt->cdb[0] = REQUEST_SENSE;
5839 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5840 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5841 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5842 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5844 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5845 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5847 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5848 IPR_REQUEST_SENSE_TIMEOUT * 2);
5852 * ipr_erp_cancel_all - Send cancel all to a device
5853 * @ipr_cmd: ipr command struct
5855 * This function sends a cancel all to a device to clear the
5856 * queue. If we are running TCQ on the device, QERR is set to 1,
5857 * which means all outstanding ops have been dropped on the floor.
5858 * Cancel all will return them to us.
5863 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5865 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5866 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5867 struct ipr_cmd_pkt *cmd_pkt;
5871 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5873 if (!scsi_cmd->device->simple_tags) {
5874 ipr_erp_request_sense(ipr_cmd);
5878 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5879 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5880 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5882 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5883 IPR_CANCEL_ALL_TIMEOUT);
5887 * ipr_dump_ioasa - Dump contents of IOASA
5888 * @ioa_cfg: ioa config struct
5889 * @ipr_cmd: ipr command struct
5890 * @res: resource entry struct
5892 * This function is invoked by the interrupt handler when ops
5893 * fail. It will log the IOASA if appropriate. Only called
5899 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5900 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5904 u32 ioasc, fd_ioasc;
5905 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5906 __be32 *ioasa_data = (__be32 *)ioasa;
5909 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5910 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5915 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5918 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5919 error_index = ipr_get_error(fd_ioasc);
5921 error_index = ipr_get_error(ioasc);
5923 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5924 /* Don't log an error if the IOA already logged one */
5925 if (ioasa->hdr.ilid != 0)
5928 if (!ipr_is_gscsi(res))
5931 if (ipr_error_table[error_index].log_ioasa == 0)
5935 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5937 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5938 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5939 data_len = sizeof(struct ipr_ioasa64);
5940 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5941 data_len = sizeof(struct ipr_ioasa);
5943 ipr_err("IOASA Dump:\n");
5945 for (i = 0; i < data_len / 4; i += 4) {
5946 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5947 be32_to_cpu(ioasa_data[i]),
5948 be32_to_cpu(ioasa_data[i+1]),
5949 be32_to_cpu(ioasa_data[i+2]),
5950 be32_to_cpu(ioasa_data[i+3]));
5955 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5957 * @sense_buf: sense data buffer
5962 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5965 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5966 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5967 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5968 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5970 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5972 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5975 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5977 if (ipr_is_vset_device(res) &&
5978 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5979 ioasa->u.vset.failing_lba_hi != 0) {
5980 sense_buf[0] = 0x72;
5981 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5982 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5983 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5987 sense_buf[9] = 0x0A;
5988 sense_buf[10] = 0x80;
5990 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5992 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5993 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5994 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5995 sense_buf[15] = failing_lba & 0x000000ff;
5997 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5999 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6000 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6001 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6002 sense_buf[19] = failing_lba & 0x000000ff;
6004 sense_buf[0] = 0x70;
6005 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6006 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6007 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6009 /* Illegal request */
6010 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6011 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6012 sense_buf[7] = 10; /* additional length */
6014 /* IOARCB was in error */
6015 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6016 sense_buf[15] = 0xC0;
6017 else /* Parameter data was invalid */
6018 sense_buf[15] = 0x80;
6021 ((IPR_FIELD_POINTER_MASK &
6022 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6024 (IPR_FIELD_POINTER_MASK &
6025 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6027 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6028 if (ipr_is_vset_device(res))
6029 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6031 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6033 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6034 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6035 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6036 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6037 sense_buf[6] = failing_lba & 0x000000ff;
6040 sense_buf[7] = 6; /* additional length */
6046 * ipr_get_autosense - Copy autosense data to sense buffer
6047 * @ipr_cmd: ipr command struct
6049 * This function copies the autosense buffer to the buffer
6050 * in the scsi_cmd, if there is autosense available.
6053 * 1 if autosense was available / 0 if not
6055 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6057 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6058 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6060 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6063 if (ipr_cmd->ioa_cfg->sis64)
6064 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6065 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6066 SCSI_SENSE_BUFFERSIZE));
6068 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6069 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6070 SCSI_SENSE_BUFFERSIZE));
6075 * ipr_erp_start - Process an error response for a SCSI op
6076 * @ioa_cfg: ioa config struct
6077 * @ipr_cmd: ipr command struct
6079 * This function determines whether or not to initiate ERP
6080 * on the affected device.
6085 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6086 struct ipr_cmnd *ipr_cmd)
6088 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6089 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6090 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6091 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6094 ipr_scsi_eh_done(ipr_cmd);
6098 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6099 ipr_gen_sense(ipr_cmd);
6101 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6103 switch (masked_ioasc) {
6104 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6105 if (ipr_is_naca_model(res))
6106 scsi_cmd->result |= (DID_ABORT << 16);
6108 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6110 case IPR_IOASC_IR_RESOURCE_HANDLE:
6111 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6112 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6114 case IPR_IOASC_HW_SEL_TIMEOUT:
6115 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6116 if (!ipr_is_naca_model(res))
6117 res->needs_sync_complete = 1;
6119 case IPR_IOASC_SYNC_REQUIRED:
6121 res->needs_sync_complete = 1;
6122 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6124 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6125 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6126 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6128 case IPR_IOASC_BUS_WAS_RESET:
6129 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6131 * Report the bus reset and ask for a retry. The device
6132 * will give CC/UA the next command.
6134 if (!res->resetting_device)
6135 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6136 scsi_cmd->result |= (DID_ERROR << 16);
6137 if (!ipr_is_naca_model(res))
6138 res->needs_sync_complete = 1;
6140 case IPR_IOASC_HW_DEV_BUS_STATUS:
6141 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6142 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6143 if (!ipr_get_autosense(ipr_cmd)) {
6144 if (!ipr_is_naca_model(res)) {
6145 ipr_erp_cancel_all(ipr_cmd);
6150 if (!ipr_is_naca_model(res))
6151 res->needs_sync_complete = 1;
6153 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6156 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6157 scsi_cmd->result |= (DID_ERROR << 16);
6158 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6159 res->needs_sync_complete = 1;
6163 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6164 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6165 scsi_cmd->scsi_done(scsi_cmd);
6169 * ipr_scsi_done - mid-layer done function
6170 * @ipr_cmd: ipr command struct
6172 * This function is invoked by the interrupt handler for
6173 * ops generated by the SCSI mid-layer
6178 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6180 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6181 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6182 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6183 unsigned long hrrq_flags;
6185 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6187 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6188 scsi_dma_unmap(scsi_cmd);
6190 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6191 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6192 scsi_cmd->scsi_done(scsi_cmd);
6193 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6195 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6196 ipr_erp_start(ioa_cfg, ipr_cmd);
6197 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6202 * ipr_queuecommand - Queue a mid-layer request
6203 * @shost: scsi host struct
6204 * @scsi_cmd: scsi command struct
6206 * This function queues a request generated by the mid-layer.
6210 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6211 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6213 static int ipr_queuecommand(struct Scsi_Host *shost,
6214 struct scsi_cmnd *scsi_cmd)
6216 struct ipr_ioa_cfg *ioa_cfg;
6217 struct ipr_resource_entry *res;
6218 struct ipr_ioarcb *ioarcb;
6219 struct ipr_cmnd *ipr_cmd;
6220 unsigned long hrrq_flags, lock_flags;
6222 struct ipr_hrr_queue *hrrq;
6225 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6227 scsi_cmd->result = (DID_OK << 16);
6228 res = scsi_cmd->device->hostdata;
6230 if (ipr_is_gata(res) && res->sata_port) {
6231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6232 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6237 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6238 hrrq = &ioa_cfg->hrrq[hrrq_id];
6240 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6242 * We are currently blocking all devices due to a host reset
6243 * We have told the host to stop giving us new requests, but
6244 * ERP ops don't count. FIXME
6246 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6247 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6248 return SCSI_MLQUEUE_HOST_BUSY;
6252 * FIXME - Create scsi_set_host_offline interface
6253 * and the ioa_is_dead check can be removed
6255 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6256 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6260 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6261 if (ipr_cmd == NULL) {
6262 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6263 return SCSI_MLQUEUE_HOST_BUSY;
6265 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6267 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6268 ioarcb = &ipr_cmd->ioarcb;
6270 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6271 ipr_cmd->scsi_cmd = scsi_cmd;
6272 ipr_cmd->done = ipr_scsi_eh_done;
6274 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6275 if (scsi_cmd->underflow == 0)
6276 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6278 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6279 if (ipr_is_gscsi(res) && res->reset_occurred) {
6280 res->reset_occurred = 0;
6281 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6283 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6284 if (scsi_cmd->flags & SCMD_TAGGED)
6285 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6287 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6290 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6291 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6292 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6296 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6298 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6300 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6301 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6302 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6303 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6305 scsi_dma_unmap(scsi_cmd);
6306 return SCSI_MLQUEUE_HOST_BUSY;
6309 if (unlikely(hrrq->ioa_is_dead)) {
6310 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6311 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6312 scsi_dma_unmap(scsi_cmd);
6316 ioarcb->res_handle = res->res_handle;
6317 if (res->needs_sync_complete) {
6318 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6319 res->needs_sync_complete = 0;
6321 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6322 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6323 ipr_send_command(ipr_cmd);
6324 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6328 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6329 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6330 scsi_cmd->result = (DID_NO_CONNECT << 16);
6331 scsi_cmd->scsi_done(scsi_cmd);
6332 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6337 * ipr_ioctl - IOCTL handler
6338 * @sdev: scsi device struct
6343 * 0 on success / other on failure
6345 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6347 struct ipr_resource_entry *res;
6349 res = (struct ipr_resource_entry *)sdev->hostdata;
6350 if (res && ipr_is_gata(res)) {
6351 if (cmd == HDIO_GET_IDENTITY)
6353 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6360 * ipr_info - Get information about the card/driver
6361 * @scsi_host: scsi host struct
6364 * pointer to buffer with description string
6366 static const char *ipr_ioa_info(struct Scsi_Host *host)
6368 static char buffer[512];
6369 struct ipr_ioa_cfg *ioa_cfg;
6370 unsigned long lock_flags = 0;
6372 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6374 spin_lock_irqsave(host->host_lock, lock_flags);
6375 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6376 spin_unlock_irqrestore(host->host_lock, lock_flags);
6381 static struct scsi_host_template driver_template = {
6382 .module = THIS_MODULE,
6384 .info = ipr_ioa_info,
6386 .queuecommand = ipr_queuecommand,
6387 .eh_abort_handler = ipr_eh_abort,
6388 .eh_device_reset_handler = ipr_eh_dev_reset,
6389 .eh_host_reset_handler = ipr_eh_host_reset,
6390 .slave_alloc = ipr_slave_alloc,
6391 .slave_configure = ipr_slave_configure,
6392 .slave_destroy = ipr_slave_destroy,
6393 .scan_finished = ipr_scan_finished,
6394 .target_alloc = ipr_target_alloc,
6395 .target_destroy = ipr_target_destroy,
6396 .change_queue_depth = ipr_change_queue_depth,
6397 .bios_param = ipr_biosparam,
6398 .can_queue = IPR_MAX_COMMANDS,
6400 .sg_tablesize = IPR_MAX_SGLIST,
6401 .max_sectors = IPR_IOA_MAX_SECTORS,
6402 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6403 .use_clustering = ENABLE_CLUSTERING,
6404 .shost_attrs = ipr_ioa_attrs,
6405 .sdev_attrs = ipr_dev_attrs,
6406 .proc_name = IPR_NAME,
6412 * ipr_ata_phy_reset - libata phy_reset handler
6413 * @ap: ata port to reset
6416 static void ipr_ata_phy_reset(struct ata_port *ap)
6418 unsigned long flags;
6419 struct ipr_sata_port *sata_port = ap->private_data;
6420 struct ipr_resource_entry *res = sata_port->res;
6421 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6425 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6426 while (ioa_cfg->in_reset_reload) {
6427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6428 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6429 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6432 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6435 rc = ipr_device_reset(ioa_cfg, res);
6438 ap->link.device[0].class = ATA_DEV_NONE;
6442 ap->link.device[0].class = res->ata_class;
6443 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6444 ap->link.device[0].class = ATA_DEV_NONE;
6447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6452 * ipr_ata_post_internal - Cleanup after an internal command
6453 * @qc: ATA queued command
6458 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6460 struct ipr_sata_port *sata_port = qc->ap->private_data;
6461 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6462 struct ipr_cmnd *ipr_cmd;
6463 struct ipr_hrr_queue *hrrq;
6464 unsigned long flags;
6466 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6467 while (ioa_cfg->in_reset_reload) {
6468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6469 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6470 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6473 for_each_hrrq(hrrq, ioa_cfg) {
6474 spin_lock(&hrrq->_lock);
6475 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6476 if (ipr_cmd->qc == qc) {
6477 ipr_device_reset(ioa_cfg, sata_port->res);
6481 spin_unlock(&hrrq->_lock);
6483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6487 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6488 * @regs: destination
6489 * @tf: source ATA taskfile
6494 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6495 struct ata_taskfile *tf)
6497 regs->feature = tf->feature;
6498 regs->nsect = tf->nsect;
6499 regs->lbal = tf->lbal;
6500 regs->lbam = tf->lbam;
6501 regs->lbah = tf->lbah;
6502 regs->device = tf->device;
6503 regs->command = tf->command;
6504 regs->hob_feature = tf->hob_feature;
6505 regs->hob_nsect = tf->hob_nsect;
6506 regs->hob_lbal = tf->hob_lbal;
6507 regs->hob_lbam = tf->hob_lbam;
6508 regs->hob_lbah = tf->hob_lbah;
6509 regs->ctl = tf->ctl;
6513 * ipr_sata_done - done function for SATA commands
6514 * @ipr_cmd: ipr command struct
6516 * This function is invoked by the interrupt handler for
6517 * ops generated by the SCSI mid-layer to SATA devices
6522 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6525 struct ata_queued_cmd *qc = ipr_cmd->qc;
6526 struct ipr_sata_port *sata_port = qc->ap->private_data;
6527 struct ipr_resource_entry *res = sata_port->res;
6528 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6530 spin_lock(&ipr_cmd->hrrq->_lock);
6531 if (ipr_cmd->ioa_cfg->sis64)
6532 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6533 sizeof(struct ipr_ioasa_gata));
6535 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6536 sizeof(struct ipr_ioasa_gata));
6537 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6539 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6540 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6542 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6543 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6545 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6546 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6547 spin_unlock(&ipr_cmd->hrrq->_lock);
6548 ata_qc_complete(qc);
6552 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6553 * @ipr_cmd: ipr command struct
6554 * @qc: ATA queued command
6557 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6558 struct ata_queued_cmd *qc)
6560 u32 ioadl_flags = 0;
6561 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6562 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6563 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6564 int len = qc->nbytes;
6565 struct scatterlist *sg;
6567 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6572 if (qc->dma_dir == DMA_TO_DEVICE) {
6573 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6575 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6576 ioadl_flags = IPR_IOADL_FLAGS_READ;
6578 ioarcb->data_transfer_length = cpu_to_be32(len);
6580 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6584 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6585 ioadl64->flags = cpu_to_be32(ioadl_flags);
6586 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6587 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6589 last_ioadl64 = ioadl64;
6593 if (likely(last_ioadl64))
6594 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6598 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6599 * @ipr_cmd: ipr command struct
6600 * @qc: ATA queued command
6603 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6604 struct ata_queued_cmd *qc)
6606 u32 ioadl_flags = 0;
6607 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6608 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6609 struct ipr_ioadl_desc *last_ioadl = NULL;
6610 int len = qc->nbytes;
6611 struct scatterlist *sg;
6617 if (qc->dma_dir == DMA_TO_DEVICE) {
6618 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6619 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6620 ioarcb->data_transfer_length = cpu_to_be32(len);
6622 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6623 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6624 ioadl_flags = IPR_IOADL_FLAGS_READ;
6625 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6626 ioarcb->read_ioadl_len =
6627 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6630 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6631 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6632 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6638 if (likely(last_ioadl))
6639 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6643 * ipr_qc_defer - Get a free ipr_cmd
6644 * @qc: queued command
6649 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6651 struct ata_port *ap = qc->ap;
6652 struct ipr_sata_port *sata_port = ap->private_data;
6653 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6654 struct ipr_cmnd *ipr_cmd;
6655 struct ipr_hrr_queue *hrrq;
6658 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6659 hrrq = &ioa_cfg->hrrq[hrrq_id];
6661 qc->lldd_task = NULL;
6662 spin_lock(&hrrq->_lock);
6663 if (unlikely(hrrq->ioa_is_dead)) {
6664 spin_unlock(&hrrq->_lock);
6668 if (unlikely(!hrrq->allow_cmds)) {
6669 spin_unlock(&hrrq->_lock);
6670 return ATA_DEFER_LINK;
6673 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6674 if (ipr_cmd == NULL) {
6675 spin_unlock(&hrrq->_lock);
6676 return ATA_DEFER_LINK;
6679 qc->lldd_task = ipr_cmd;
6680 spin_unlock(&hrrq->_lock);
6685 * ipr_qc_issue - Issue a SATA qc to a device
6686 * @qc: queued command
6691 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6693 struct ata_port *ap = qc->ap;
6694 struct ipr_sata_port *sata_port = ap->private_data;
6695 struct ipr_resource_entry *res = sata_port->res;
6696 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6697 struct ipr_cmnd *ipr_cmd;
6698 struct ipr_ioarcb *ioarcb;
6699 struct ipr_ioarcb_ata_regs *regs;
6701 if (qc->lldd_task == NULL)
6704 ipr_cmd = qc->lldd_task;
6705 if (ipr_cmd == NULL)
6706 return AC_ERR_SYSTEM;
6708 qc->lldd_task = NULL;
6709 spin_lock(&ipr_cmd->hrrq->_lock);
6710 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6711 ipr_cmd->hrrq->ioa_is_dead)) {
6712 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6713 spin_unlock(&ipr_cmd->hrrq->_lock);
6714 return AC_ERR_SYSTEM;
6717 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6718 ioarcb = &ipr_cmd->ioarcb;
6720 if (ioa_cfg->sis64) {
6721 regs = &ipr_cmd->i.ata_ioadl.regs;
6722 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6724 regs = &ioarcb->u.add_data.u.regs;
6726 memset(regs, 0, sizeof(*regs));
6727 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6729 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6731 ipr_cmd->done = ipr_sata_done;
6732 ipr_cmd->ioarcb.res_handle = res->res_handle;
6733 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6734 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6735 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6736 ipr_cmd->dma_use_sg = qc->n_elem;
6739 ipr_build_ata_ioadl64(ipr_cmd, qc);
6741 ipr_build_ata_ioadl(ipr_cmd, qc);
6743 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6744 ipr_copy_sata_tf(regs, &qc->tf);
6745 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6746 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6748 switch (qc->tf.protocol) {
6749 case ATA_PROT_NODATA:
6754 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6757 case ATAPI_PROT_PIO:
6758 case ATAPI_PROT_NODATA:
6759 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6762 case ATAPI_PROT_DMA:
6763 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6764 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6769 spin_unlock(&ipr_cmd->hrrq->_lock);
6770 return AC_ERR_INVALID;
6773 ipr_send_command(ipr_cmd);
6774 spin_unlock(&ipr_cmd->hrrq->_lock);
6780 * ipr_qc_fill_rtf - Read result TF
6781 * @qc: ATA queued command
6786 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6788 struct ipr_sata_port *sata_port = qc->ap->private_data;
6789 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6790 struct ata_taskfile *tf = &qc->result_tf;
6792 tf->feature = g->error;
6793 tf->nsect = g->nsect;
6797 tf->device = g->device;
6798 tf->command = g->status;
6799 tf->hob_nsect = g->hob_nsect;
6800 tf->hob_lbal = g->hob_lbal;
6801 tf->hob_lbam = g->hob_lbam;
6802 tf->hob_lbah = g->hob_lbah;
6807 static struct ata_port_operations ipr_sata_ops = {
6808 .phy_reset = ipr_ata_phy_reset,
6809 .hardreset = ipr_sata_reset,
6810 .post_internal_cmd = ipr_ata_post_internal,
6811 .qc_prep = ata_noop_qc_prep,
6812 .qc_defer = ipr_qc_defer,
6813 .qc_issue = ipr_qc_issue,
6814 .qc_fill_rtf = ipr_qc_fill_rtf,
6815 .port_start = ata_sas_port_start,
6816 .port_stop = ata_sas_port_stop
6819 static struct ata_port_info sata_port_info = {
6820 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6821 .pio_mask = ATA_PIO4_ONLY,
6822 .mwdma_mask = ATA_MWDMA2,
6823 .udma_mask = ATA_UDMA6,
6824 .port_ops = &ipr_sata_ops
6827 #ifdef CONFIG_PPC_PSERIES
6828 static const u16 ipr_blocked_processors[] = {
6840 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6841 * @ioa_cfg: ioa cfg struct
6843 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6844 * certain pSeries hardware. This function determines if the given
6845 * adapter is in one of these confgurations or not.
6848 * 1 if adapter is not supported / 0 if adapter is supported
6850 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6854 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6855 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6856 if (pvr_version_is(ipr_blocked_processors[i]))
6863 #define ipr_invalid_adapter(ioa_cfg) 0
6867 * ipr_ioa_bringdown_done - IOA bring down completion.
6868 * @ipr_cmd: ipr command struct
6870 * This function processes the completion of an adapter bring down.
6871 * It wakes any reset sleepers.
6876 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6878 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6882 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6884 spin_unlock_irq(ioa_cfg->host->host_lock);
6885 scsi_unblock_requests(ioa_cfg->host);
6886 spin_lock_irq(ioa_cfg->host->host_lock);
6889 ioa_cfg->in_reset_reload = 0;
6890 ioa_cfg->reset_retries = 0;
6891 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6892 spin_lock(&ioa_cfg->hrrq[i]._lock);
6893 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6894 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6898 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6899 wake_up_all(&ioa_cfg->reset_wait_q);
6902 return IPR_RC_JOB_RETURN;
6906 * ipr_ioa_reset_done - IOA reset completion.
6907 * @ipr_cmd: ipr command struct
6909 * This function processes the completion of an adapter reset.
6910 * It schedules any necessary mid-layer add/removes and
6911 * wakes any reset sleepers.
6916 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6918 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6919 struct ipr_resource_entry *res;
6920 struct ipr_hostrcb *hostrcb, *temp;
6924 ioa_cfg->in_reset_reload = 0;
6925 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6926 spin_lock(&ioa_cfg->hrrq[j]._lock);
6927 ioa_cfg->hrrq[j].allow_cmds = 1;
6928 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6931 ioa_cfg->reset_cmd = NULL;
6932 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6934 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6935 if (res->add_to_ml || res->del_from_ml) {
6940 schedule_work(&ioa_cfg->work_q);
6942 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6943 list_del(&hostrcb->queue);
6944 if (i++ < IPR_NUM_LOG_HCAMS)
6945 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6947 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6950 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6951 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6953 ioa_cfg->reset_retries = 0;
6954 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6955 wake_up_all(&ioa_cfg->reset_wait_q);
6957 spin_unlock(ioa_cfg->host->host_lock);
6958 scsi_unblock_requests(ioa_cfg->host);
6959 spin_lock(ioa_cfg->host->host_lock);
6961 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6962 scsi_block_requests(ioa_cfg->host);
6964 schedule_work(&ioa_cfg->work_q);
6966 return IPR_RC_JOB_RETURN;
6970 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6971 * @supported_dev: supported device struct
6972 * @vpids: vendor product id struct
6977 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6978 struct ipr_std_inq_vpids *vpids)
6980 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6981 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6982 supported_dev->num_records = 1;
6983 supported_dev->data_length =
6984 cpu_to_be16(sizeof(struct ipr_supported_device));
6985 supported_dev->reserved = 0;
6989 * ipr_set_supported_devs - Send Set Supported Devices for a device
6990 * @ipr_cmd: ipr command struct
6992 * This function sends a Set Supported Devices to the adapter
6995 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6997 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6999 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7000 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7001 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7002 struct ipr_resource_entry *res = ipr_cmd->u.res;
7004 ipr_cmd->job_step = ipr_ioa_reset_done;
7006 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7007 if (!ipr_is_scsi_disk(res))
7010 ipr_cmd->u.res = res;
7011 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7013 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7014 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7015 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7017 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7018 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7019 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7020 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7022 ipr_init_ioadl(ipr_cmd,
7023 ioa_cfg->vpd_cbs_dma +
7024 offsetof(struct ipr_misc_cbs, supp_dev),
7025 sizeof(struct ipr_supported_device),
7026 IPR_IOADL_FLAGS_WRITE_LAST);
7028 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7029 IPR_SET_SUP_DEVICE_TIMEOUT);
7031 if (!ioa_cfg->sis64)
7032 ipr_cmd->job_step = ipr_set_supported_devs;
7034 return IPR_RC_JOB_RETURN;
7038 return IPR_RC_JOB_CONTINUE;
7042 * ipr_get_mode_page - Locate specified mode page
7043 * @mode_pages: mode page buffer
7044 * @page_code: page code to find
7045 * @len: minimum required length for mode page
7048 * pointer to mode page / NULL on failure
7050 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7051 u32 page_code, u32 len)
7053 struct ipr_mode_page_hdr *mode_hdr;
7057 if (!mode_pages || (mode_pages->hdr.length == 0))
7060 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7061 mode_hdr = (struct ipr_mode_page_hdr *)
7062 (mode_pages->data + mode_pages->hdr.block_desc_len);
7065 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7066 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7070 page_length = (sizeof(struct ipr_mode_page_hdr) +
7071 mode_hdr->page_length);
7072 length -= page_length;
7073 mode_hdr = (struct ipr_mode_page_hdr *)
7074 ((unsigned long)mode_hdr + page_length);
7081 * ipr_check_term_power - Check for term power errors
7082 * @ioa_cfg: ioa config struct
7083 * @mode_pages: IOAFP mode pages buffer
7085 * Check the IOAFP's mode page 28 for term power errors
7090 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7091 struct ipr_mode_pages *mode_pages)
7095 struct ipr_dev_bus_entry *bus;
7096 struct ipr_mode_page28 *mode_page;
7098 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7099 sizeof(struct ipr_mode_page28));
7101 entry_length = mode_page->entry_length;
7103 bus = mode_page->bus;
7105 for (i = 0; i < mode_page->num_entries; i++) {
7106 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7107 dev_err(&ioa_cfg->pdev->dev,
7108 "Term power is absent on scsi bus %d\n",
7112 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7117 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7118 * @ioa_cfg: ioa config struct
7120 * Looks through the config table checking for SES devices. If
7121 * the SES device is in the SES table indicating a maximum SCSI
7122 * bus speed, the speed is limited for the bus.
7127 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7132 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7133 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7134 ioa_cfg->bus_attr[i].bus_width);
7136 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7137 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7142 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7143 * @ioa_cfg: ioa config struct
7144 * @mode_pages: mode page 28 buffer
7146 * Updates mode page 28 based on driver configuration
7151 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7152 struct ipr_mode_pages *mode_pages)
7154 int i, entry_length;
7155 struct ipr_dev_bus_entry *bus;
7156 struct ipr_bus_attributes *bus_attr;
7157 struct ipr_mode_page28 *mode_page;
7159 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7160 sizeof(struct ipr_mode_page28));
7162 entry_length = mode_page->entry_length;
7164 /* Loop for each device bus entry */
7165 for (i = 0, bus = mode_page->bus;
7166 i < mode_page->num_entries;
7167 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7168 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7169 dev_err(&ioa_cfg->pdev->dev,
7170 "Invalid resource address reported: 0x%08X\n",
7171 IPR_GET_PHYS_LOC(bus->res_addr));
7175 bus_attr = &ioa_cfg->bus_attr[i];
7176 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7177 bus->bus_width = bus_attr->bus_width;
7178 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7179 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7180 if (bus_attr->qas_enabled)
7181 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7183 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7188 * ipr_build_mode_select - Build a mode select command
7189 * @ipr_cmd: ipr command struct
7190 * @res_handle: resource handle to send command to
7191 * @parm: Byte 2 of Mode Sense command
7192 * @dma_addr: DMA buffer address
7193 * @xfer_len: data transfer length
7198 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7199 __be32 res_handle, u8 parm,
7200 dma_addr_t dma_addr, u8 xfer_len)
7202 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7204 ioarcb->res_handle = res_handle;
7205 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7206 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7207 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7208 ioarcb->cmd_pkt.cdb[1] = parm;
7209 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7211 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7215 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7216 * @ipr_cmd: ipr command struct
7218 * This function sets up the SCSI bus attributes and sends
7219 * a Mode Select for Page 28 to activate them.
7224 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7227 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7231 ipr_scsi_bus_speed_limit(ioa_cfg);
7232 ipr_check_term_power(ioa_cfg, mode_pages);
7233 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7234 length = mode_pages->hdr.length + 1;
7235 mode_pages->hdr.length = 0;
7237 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7238 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7241 ipr_cmd->job_step = ipr_set_supported_devs;
7242 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7243 struct ipr_resource_entry, queue);
7244 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7247 return IPR_RC_JOB_RETURN;
7251 * ipr_build_mode_sense - Builds a mode sense command
7252 * @ipr_cmd: ipr command struct
7253 * @res: resource entry struct
7254 * @parm: Byte 2 of mode sense command
7255 * @dma_addr: DMA address of mode sense buffer
7256 * @xfer_len: Size of DMA buffer
7261 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7263 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7265 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7267 ioarcb->res_handle = res_handle;
7268 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7269 ioarcb->cmd_pkt.cdb[2] = parm;
7270 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7271 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7273 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7277 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7278 * @ipr_cmd: ipr command struct
7280 * This function handles the failure of an IOA bringup command.
7285 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7287 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7288 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7290 dev_err(&ioa_cfg->pdev->dev,
7291 "0x%02X failed with IOASC: 0x%08X\n",
7292 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7294 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7295 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7296 return IPR_RC_JOB_RETURN;
7300 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7301 * @ipr_cmd: ipr command struct
7303 * This function handles the failure of a Mode Sense to the IOAFP.
7304 * Some adapters do not handle all mode pages.
7307 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7309 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7311 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7312 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7314 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7315 ipr_cmd->job_step = ipr_set_supported_devs;
7316 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7317 struct ipr_resource_entry, queue);
7318 return IPR_RC_JOB_CONTINUE;
7321 return ipr_reset_cmd_failed(ipr_cmd);
7325 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7326 * @ipr_cmd: ipr command struct
7328 * This function send a Page 28 mode sense to the IOA to
7329 * retrieve SCSI bus attributes.
7334 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7336 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7339 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7340 0x28, ioa_cfg->vpd_cbs_dma +
7341 offsetof(struct ipr_misc_cbs, mode_pages),
7342 sizeof(struct ipr_mode_pages));
7344 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7345 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7347 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7350 return IPR_RC_JOB_RETURN;
7354 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7355 * @ipr_cmd: ipr command struct
7357 * This function enables dual IOA RAID support if possible.
7362 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7364 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7365 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7366 struct ipr_mode_page24 *mode_page;
7370 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7371 sizeof(struct ipr_mode_page24));
7374 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7376 length = mode_pages->hdr.length + 1;
7377 mode_pages->hdr.length = 0;
7379 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7380 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7383 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7384 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7387 return IPR_RC_JOB_RETURN;
7391 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7392 * @ipr_cmd: ipr command struct
7394 * This function handles the failure of a Mode Sense to the IOAFP.
7395 * Some adapters do not handle all mode pages.
7398 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7400 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7402 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7404 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7405 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7406 return IPR_RC_JOB_CONTINUE;
7409 return ipr_reset_cmd_failed(ipr_cmd);
7413 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7414 * @ipr_cmd: ipr command struct
7416 * This function send a mode sense to the IOA to retrieve
7417 * the IOA Advanced Function Control mode page.
7422 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7427 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7428 0x24, ioa_cfg->vpd_cbs_dma +
7429 offsetof(struct ipr_misc_cbs, mode_pages),
7430 sizeof(struct ipr_mode_pages));
7432 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7433 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7435 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7438 return IPR_RC_JOB_RETURN;
7442 * ipr_init_res_table - Initialize the resource table
7443 * @ipr_cmd: ipr command struct
7445 * This function looks through the existing resource table, comparing
7446 * it with the config table. This function will take care of old/new
7447 * devices and schedule adding/removing them from the mid-layer
7451 * IPR_RC_JOB_CONTINUE
7453 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7455 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7456 struct ipr_resource_entry *res, *temp;
7457 struct ipr_config_table_entry_wrapper cfgtew;
7458 int entries, found, flag, i;
7463 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7465 flag = ioa_cfg->u.cfg_table->hdr.flags;
7467 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7468 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7470 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7471 list_move_tail(&res->queue, &old_res);
7474 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7476 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7478 for (i = 0; i < entries; i++) {
7480 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7482 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7485 list_for_each_entry_safe(res, temp, &old_res, queue) {
7486 if (ipr_is_same_device(res, &cfgtew)) {
7487 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7494 if (list_empty(&ioa_cfg->free_res_q)) {
7495 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7500 res = list_entry(ioa_cfg->free_res_q.next,
7501 struct ipr_resource_entry, queue);
7502 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7503 ipr_init_res_entry(res, &cfgtew);
7505 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7506 res->sdev->allow_restart = 1;
7509 ipr_update_res_entry(res, &cfgtew);
7512 list_for_each_entry_safe(res, temp, &old_res, queue) {
7514 res->del_from_ml = 1;
7515 res->res_handle = IPR_INVALID_RES_HANDLE;
7516 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7520 list_for_each_entry_safe(res, temp, &old_res, queue) {
7521 ipr_clear_res_target(res);
7522 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7525 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7526 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7528 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7531 return IPR_RC_JOB_CONTINUE;
7535 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7536 * @ipr_cmd: ipr command struct
7538 * This function sends a Query IOA Configuration command
7539 * to the adapter to retrieve the IOA configuration table.
7544 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7547 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7548 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7549 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7552 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7553 ioa_cfg->dual_raid = 1;
7554 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7555 ucode_vpd->major_release, ucode_vpd->card_type,
7556 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7557 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7558 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7560 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7561 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7562 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7563 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7565 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7566 IPR_IOADL_FLAGS_READ_LAST);
7568 ipr_cmd->job_step = ipr_init_res_table;
7570 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7573 return IPR_RC_JOB_RETURN;
7577 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7578 * @ipr_cmd: ipr command struct
7580 * This utility function sends an inquiry to the adapter.
7585 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7586 dma_addr_t dma_addr, u8 xfer_len)
7588 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7591 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7592 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7594 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7595 ioarcb->cmd_pkt.cdb[1] = flags;
7596 ioarcb->cmd_pkt.cdb[2] = page;
7597 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7599 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7601 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7606 * ipr_inquiry_page_supported - Is the given inquiry page supported
7607 * @page0: inquiry page 0 buffer
7610 * This function determines if the specified inquiry page is supported.
7613 * 1 if page is supported / 0 if not
7615 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7619 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7620 if (page0->page[i] == page)
7627 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7628 * @ipr_cmd: ipr command struct
7630 * This function sends a Page 0xD0 inquiry to the adapter
7631 * to retrieve adapter capabilities.
7634 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7636 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7639 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7640 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7643 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7644 memset(cap, 0, sizeof(*cap));
7646 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7647 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7648 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7649 sizeof(struct ipr_inquiry_cap));
7650 return IPR_RC_JOB_RETURN;
7654 return IPR_RC_JOB_CONTINUE;
7658 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7659 * @ipr_cmd: ipr command struct
7661 * This function sends a Page 3 inquiry to the adapter
7662 * to retrieve software VPD information.
7665 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7667 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7669 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7673 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7675 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7676 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7677 sizeof(struct ipr_inquiry_page3));
7680 return IPR_RC_JOB_RETURN;
7684 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7685 * @ipr_cmd: ipr command struct
7687 * This function sends a Page 0 inquiry to the adapter
7688 * to retrieve supported inquiry pages.
7691 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7693 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7695 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7700 /* Grab the type out of the VPD and store it away */
7701 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7703 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7705 if (ipr_invalid_adapter(ioa_cfg)) {
7706 dev_err(&ioa_cfg->pdev->dev,
7707 "Adapter not supported in this hardware configuration.\n");
7709 if (!ipr_testmode) {
7710 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7711 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7712 list_add_tail(&ipr_cmd->queue,
7713 &ioa_cfg->hrrq->hrrq_free_q);
7714 return IPR_RC_JOB_RETURN;
7718 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7720 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7721 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7722 sizeof(struct ipr_inquiry_page0));
7725 return IPR_RC_JOB_RETURN;
7729 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7730 * @ipr_cmd: ipr command struct
7732 * This function sends a standard inquiry to the adapter.
7737 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7739 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7742 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7744 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7745 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7746 sizeof(struct ipr_ioa_vpd));
7749 return IPR_RC_JOB_RETURN;
7753 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7754 * @ipr_cmd: ipr command struct
7756 * This function send an Identify Host Request Response Queue
7757 * command to establish the HRRQ with the adapter.
7762 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7765 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7766 struct ipr_hrr_queue *hrrq;
7769 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7770 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7772 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7773 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7775 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7776 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7778 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7780 ioarcb->cmd_pkt.cdb[1] = 0x1;
7782 if (ioa_cfg->nvectors == 1)
7783 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7785 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7787 ioarcb->cmd_pkt.cdb[2] =
7788 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7789 ioarcb->cmd_pkt.cdb[3] =
7790 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7791 ioarcb->cmd_pkt.cdb[4] =
7792 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7793 ioarcb->cmd_pkt.cdb[5] =
7794 ((u64) hrrq->host_rrq_dma) & 0xff;
7795 ioarcb->cmd_pkt.cdb[7] =
7796 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7797 ioarcb->cmd_pkt.cdb[8] =
7798 (sizeof(u32) * hrrq->size) & 0xff;
7800 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7801 ioarcb->cmd_pkt.cdb[9] =
7802 ioa_cfg->identify_hrrq_index;
7804 if (ioa_cfg->sis64) {
7805 ioarcb->cmd_pkt.cdb[10] =
7806 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7807 ioarcb->cmd_pkt.cdb[11] =
7808 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7809 ioarcb->cmd_pkt.cdb[12] =
7810 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7811 ioarcb->cmd_pkt.cdb[13] =
7812 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7815 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7816 ioarcb->cmd_pkt.cdb[14] =
7817 ioa_cfg->identify_hrrq_index;
7819 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7820 IPR_INTERNAL_TIMEOUT);
7822 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7823 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7826 return IPR_RC_JOB_RETURN;
7830 return IPR_RC_JOB_CONTINUE;
7834 * ipr_reset_timer_done - Adapter reset timer function
7835 * @ipr_cmd: ipr command struct
7837 * Description: This function is used in adapter reset processing
7838 * for timing events. If the reset_cmd pointer in the IOA
7839 * config struct is not this adapter's we are doing nested
7840 * resets and fail_all_ops will take care of freeing the
7846 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7848 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7849 unsigned long lock_flags = 0;
7851 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7853 if (ioa_cfg->reset_cmd == ipr_cmd) {
7854 list_del(&ipr_cmd->queue);
7855 ipr_cmd->done(ipr_cmd);
7858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7862 * ipr_reset_start_timer - Start a timer for adapter reset job
7863 * @ipr_cmd: ipr command struct
7864 * @timeout: timeout value
7866 * Description: This function is used in adapter reset processing
7867 * for timing events. If the reset_cmd pointer in the IOA
7868 * config struct is not this adapter's we are doing nested
7869 * resets and fail_all_ops will take care of freeing the
7875 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7876 unsigned long timeout)
7880 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7881 ipr_cmd->done = ipr_reset_ioa_job;
7883 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7884 ipr_cmd->timer.expires = jiffies + timeout;
7885 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7886 add_timer(&ipr_cmd->timer);
7890 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7891 * @ioa_cfg: ioa cfg struct
7896 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7898 struct ipr_hrr_queue *hrrq;
7900 for_each_hrrq(hrrq, ioa_cfg) {
7901 spin_lock(&hrrq->_lock);
7902 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7904 /* Initialize Host RRQ pointers */
7905 hrrq->hrrq_start = hrrq->host_rrq;
7906 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7907 hrrq->hrrq_curr = hrrq->hrrq_start;
7908 hrrq->toggle_bit = 1;
7909 spin_unlock(&hrrq->_lock);
7913 ioa_cfg->identify_hrrq_index = 0;
7914 if (ioa_cfg->hrrq_num == 1)
7915 atomic_set(&ioa_cfg->hrrq_index, 0);
7917 atomic_set(&ioa_cfg->hrrq_index, 1);
7919 /* Zero out config table */
7920 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7924 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7925 * @ipr_cmd: ipr command struct
7928 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7930 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7932 unsigned long stage, stage_time;
7934 volatile u32 int_reg;
7935 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7938 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7939 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7940 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7942 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7944 /* sanity check the stage_time value */
7945 if (stage_time == 0)
7946 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7947 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7948 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7949 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7950 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7952 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7953 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7954 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7955 stage_time = ioa_cfg->transop_timeout;
7956 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7957 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7958 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7959 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7960 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7961 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7962 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7963 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7964 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7965 return IPR_RC_JOB_CONTINUE;
7969 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7970 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7971 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7972 ipr_cmd->done = ipr_reset_ioa_job;
7973 add_timer(&ipr_cmd->timer);
7975 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7977 return IPR_RC_JOB_RETURN;
7981 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7982 * @ipr_cmd: ipr command struct
7984 * This function reinitializes some control blocks and
7985 * enables destructive diagnostics on the adapter.
7990 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7992 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7993 volatile u32 int_reg;
7994 volatile u64 maskval;
7998 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7999 ipr_init_ioa_mem(ioa_cfg);
8001 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8002 spin_lock(&ioa_cfg->hrrq[i]._lock);
8003 ioa_cfg->hrrq[i].allow_interrupts = 1;
8004 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8007 if (ioa_cfg->sis64) {
8008 /* Set the adapter to the correct endian mode. */
8009 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8010 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8013 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8015 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8016 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8017 ioa_cfg->regs.clr_interrupt_mask_reg32);
8018 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8019 return IPR_RC_JOB_CONTINUE;
8022 /* Enable destructive diagnostics on IOA */
8023 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8025 if (ioa_cfg->sis64) {
8026 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8027 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8028 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8030 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8032 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8034 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8036 if (ioa_cfg->sis64) {
8037 ipr_cmd->job_step = ipr_reset_next_stage;
8038 return IPR_RC_JOB_CONTINUE;
8041 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8042 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8043 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8044 ipr_cmd->done = ipr_reset_ioa_job;
8045 add_timer(&ipr_cmd->timer);
8046 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8049 return IPR_RC_JOB_RETURN;
8053 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8054 * @ipr_cmd: ipr command struct
8056 * This function is invoked when an adapter dump has run out
8057 * of processing time.
8060 * IPR_RC_JOB_CONTINUE
8062 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8064 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8066 if (ioa_cfg->sdt_state == GET_DUMP)
8067 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8068 else if (ioa_cfg->sdt_state == READ_DUMP)
8069 ioa_cfg->sdt_state = ABORT_DUMP;
8071 ioa_cfg->dump_timeout = 1;
8072 ipr_cmd->job_step = ipr_reset_alert;
8074 return IPR_RC_JOB_CONTINUE;
8078 * ipr_unit_check_no_data - Log a unit check/no data error log
8079 * @ioa_cfg: ioa config struct
8081 * Logs an error indicating the adapter unit checked, but for some
8082 * reason, we were unable to fetch the unit check buffer.
8087 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8089 ioa_cfg->errors_logged++;
8090 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8094 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8095 * @ioa_cfg: ioa config struct
8097 * Fetches the unit check buffer from the adapter by clocking the data
8098 * through the mailbox register.
8103 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8105 unsigned long mailbox;
8106 struct ipr_hostrcb *hostrcb;
8107 struct ipr_uc_sdt sdt;
8111 mailbox = readl(ioa_cfg->ioa_mailbox);
8113 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8114 ipr_unit_check_no_data(ioa_cfg);
8118 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8119 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8120 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8122 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8123 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8124 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8125 ipr_unit_check_no_data(ioa_cfg);
8129 /* Find length of the first sdt entry (UC buffer) */
8130 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8131 length = be32_to_cpu(sdt.entry[0].end_token);
8133 length = (be32_to_cpu(sdt.entry[0].end_token) -
8134 be32_to_cpu(sdt.entry[0].start_token)) &
8135 IPR_FMT2_MBX_ADDR_MASK;
8137 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8138 struct ipr_hostrcb, queue);
8139 list_del(&hostrcb->queue);
8140 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8142 rc = ipr_get_ldump_data_section(ioa_cfg,
8143 be32_to_cpu(sdt.entry[0].start_token),
8144 (__be32 *)&hostrcb->hcam,
8145 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8148 ipr_handle_log_data(ioa_cfg, hostrcb);
8149 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8150 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8151 ioa_cfg->sdt_state == GET_DUMP)
8152 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8154 ipr_unit_check_no_data(ioa_cfg);
8156 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8160 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8161 * @ipr_cmd: ipr command struct
8163 * Description: This function will call to get the unit check buffer.
8168 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8170 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8173 ioa_cfg->ioa_unit_checked = 0;
8174 ipr_get_unit_check_buffer(ioa_cfg);
8175 ipr_cmd->job_step = ipr_reset_alert;
8176 ipr_reset_start_timer(ipr_cmd, 0);
8179 return IPR_RC_JOB_RETURN;
8183 * ipr_reset_restore_cfg_space - Restore PCI config space.
8184 * @ipr_cmd: ipr command struct
8186 * Description: This function restores the saved PCI config space of
8187 * the adapter, fails all outstanding ops back to the callers, and
8188 * fetches the dump/unit check if applicable to this reset.
8191 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8193 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8195 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8199 ioa_cfg->pdev->state_saved = true;
8200 pci_restore_state(ioa_cfg->pdev);
8202 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8203 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8204 return IPR_RC_JOB_CONTINUE;
8207 ipr_fail_all_ops(ioa_cfg);
8209 if (ioa_cfg->sis64) {
8210 /* Set the adapter to the correct endian mode. */
8211 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8212 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8215 if (ioa_cfg->ioa_unit_checked) {
8216 if (ioa_cfg->sis64) {
8217 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8218 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8219 return IPR_RC_JOB_RETURN;
8221 ioa_cfg->ioa_unit_checked = 0;
8222 ipr_get_unit_check_buffer(ioa_cfg);
8223 ipr_cmd->job_step = ipr_reset_alert;
8224 ipr_reset_start_timer(ipr_cmd, 0);
8225 return IPR_RC_JOB_RETURN;
8229 if (ioa_cfg->in_ioa_bringdown) {
8230 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8232 ipr_cmd->job_step = ipr_reset_enable_ioa;
8234 if (GET_DUMP == ioa_cfg->sdt_state) {
8235 ioa_cfg->sdt_state = READ_DUMP;
8236 ioa_cfg->dump_timeout = 0;
8238 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8240 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8241 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8242 schedule_work(&ioa_cfg->work_q);
8243 return IPR_RC_JOB_RETURN;
8248 return IPR_RC_JOB_CONTINUE;
8252 * ipr_reset_bist_done - BIST has completed on the adapter.
8253 * @ipr_cmd: ipr command struct
8255 * Description: Unblock config space and resume the reset process.
8258 * IPR_RC_JOB_CONTINUE
8260 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8262 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8265 if (ioa_cfg->cfg_locked)
8266 pci_cfg_access_unlock(ioa_cfg->pdev);
8267 ioa_cfg->cfg_locked = 0;
8268 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8270 return IPR_RC_JOB_CONTINUE;
8274 * ipr_reset_start_bist - Run BIST on the adapter.
8275 * @ipr_cmd: ipr command struct
8277 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8280 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8282 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8284 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8285 int rc = PCIBIOS_SUCCESSFUL;
8288 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8289 writel(IPR_UPROCI_SIS64_START_BIST,
8290 ioa_cfg->regs.set_uproc_interrupt_reg32);
8292 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8294 if (rc == PCIBIOS_SUCCESSFUL) {
8295 ipr_cmd->job_step = ipr_reset_bist_done;
8296 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8297 rc = IPR_RC_JOB_RETURN;
8299 if (ioa_cfg->cfg_locked)
8300 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8301 ioa_cfg->cfg_locked = 0;
8302 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8303 rc = IPR_RC_JOB_CONTINUE;
8311 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8312 * @ipr_cmd: ipr command struct
8314 * Description: This clears PCI reset to the adapter and delays two seconds.
8319 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8322 ipr_cmd->job_step = ipr_reset_bist_done;
8323 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8325 return IPR_RC_JOB_RETURN;
8329 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8330 * @work: work struct
8332 * Description: This pulses warm reset to a slot.
8335 static void ipr_reset_reset_work(struct work_struct *work)
8337 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8338 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8339 struct pci_dev *pdev = ioa_cfg->pdev;
8340 unsigned long lock_flags = 0;
8343 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8344 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8345 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8348 if (ioa_cfg->reset_cmd == ipr_cmd)
8349 ipr_reset_ioa_job(ipr_cmd);
8350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8355 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8356 * @ipr_cmd: ipr command struct
8358 * Description: This asserts PCI reset to the adapter.
8363 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8365 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8368 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8369 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8370 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8372 return IPR_RC_JOB_RETURN;
8376 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8377 * @ipr_cmd: ipr command struct
8379 * Description: This attempts to block config access to the IOA.
8382 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8384 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8387 int rc = IPR_RC_JOB_CONTINUE;
8389 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8390 ioa_cfg->cfg_locked = 1;
8391 ipr_cmd->job_step = ioa_cfg->reset;
8393 if (ipr_cmd->u.time_left) {
8394 rc = IPR_RC_JOB_RETURN;
8395 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8396 ipr_reset_start_timer(ipr_cmd,
8397 IPR_CHECK_FOR_RESET_TIMEOUT);
8399 ipr_cmd->job_step = ioa_cfg->reset;
8400 dev_err(&ioa_cfg->pdev->dev,
8401 "Timed out waiting to lock config access. Resetting anyway.\n");
8409 * ipr_reset_block_config_access - Block config access to the IOA
8410 * @ipr_cmd: ipr command struct
8412 * Description: This attempts to block config access to the IOA
8415 * IPR_RC_JOB_CONTINUE
8417 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8419 ipr_cmd->ioa_cfg->cfg_locked = 0;
8420 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8421 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8422 return IPR_RC_JOB_CONTINUE;
8426 * ipr_reset_allowed - Query whether or not IOA can be reset
8427 * @ioa_cfg: ioa config struct
8430 * 0 if reset not allowed / non-zero if reset is allowed
8432 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8434 volatile u32 temp_reg;
8436 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8437 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8441 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8442 * @ipr_cmd: ipr command struct
8444 * Description: This function waits for adapter permission to run BIST,
8445 * then runs BIST. If the adapter does not give permission after a
8446 * reasonable time, we will reset the adapter anyway. The impact of
8447 * resetting the adapter without warning the adapter is the risk of
8448 * losing the persistent error log on the adapter. If the adapter is
8449 * reset while it is writing to the flash on the adapter, the flash
8450 * segment will have bad ECC and be zeroed.
8453 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8455 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8458 int rc = IPR_RC_JOB_RETURN;
8460 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8461 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8462 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8464 ipr_cmd->job_step = ipr_reset_block_config_access;
8465 rc = IPR_RC_JOB_CONTINUE;
8472 * ipr_reset_alert - Alert the adapter of a pending reset
8473 * @ipr_cmd: ipr command struct
8475 * Description: This function alerts the adapter that it will be reset.
8476 * If memory space is not currently enabled, proceed directly
8477 * to running BIST on the adapter. The timer must always be started
8478 * so we guarantee we do not run BIST from ipr_isr.
8483 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8485 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8490 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8492 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8493 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8494 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8495 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8497 ipr_cmd->job_step = ipr_reset_block_config_access;
8500 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8501 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8504 return IPR_RC_JOB_RETURN;
8508 * ipr_reset_quiesce_done - Complete IOA disconnect
8509 * @ipr_cmd: ipr command struct
8511 * Description: Freeze the adapter to complete quiesce processing
8514 * IPR_RC_JOB_CONTINUE
8516 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8521 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8522 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8524 return IPR_RC_JOB_CONTINUE;
8528 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8529 * @ipr_cmd: ipr command struct
8531 * Description: Ensure nothing is outstanding to the IOA and
8532 * proceed with IOA disconnect. Otherwise reset the IOA.
8535 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8537 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8539 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8540 struct ipr_cmnd *loop_cmd;
8541 struct ipr_hrr_queue *hrrq;
8542 int rc = IPR_RC_JOB_CONTINUE;
8546 ipr_cmd->job_step = ipr_reset_quiesce_done;
8548 for_each_hrrq(hrrq, ioa_cfg) {
8549 spin_lock(&hrrq->_lock);
8550 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8552 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8553 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8554 rc = IPR_RC_JOB_RETURN;
8557 spin_unlock(&hrrq->_lock);
8568 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8569 * @ipr_cmd: ipr command struct
8571 * Description: Cancel any oustanding HCAMs to the IOA.
8574 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8576 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8579 int rc = IPR_RC_JOB_CONTINUE;
8580 struct ipr_cmd_pkt *cmd_pkt;
8581 struct ipr_cmnd *hcam_cmd;
8582 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8585 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8587 if (!hrrq->ioa_is_dead) {
8588 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8589 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8590 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8593 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8594 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8595 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8596 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8597 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8598 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8599 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8600 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8601 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8602 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8603 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8604 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8605 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8606 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8608 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8609 IPR_CANCEL_TIMEOUT);
8611 rc = IPR_RC_JOB_RETURN;
8612 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8617 ipr_cmd->job_step = ipr_reset_alert;
8624 * ipr_reset_ucode_download_done - Microcode download completion
8625 * @ipr_cmd: ipr command struct
8627 * Description: This function unmaps the microcode download buffer.
8630 * IPR_RC_JOB_CONTINUE
8632 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8634 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8635 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8637 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8638 sglist->num_sg, DMA_TO_DEVICE);
8640 ipr_cmd->job_step = ipr_reset_alert;
8641 return IPR_RC_JOB_CONTINUE;
8645 * ipr_reset_ucode_download - Download microcode to the adapter
8646 * @ipr_cmd: ipr command struct
8648 * Description: This function checks to see if it there is microcode
8649 * to download to the adapter. If there is, a download is performed.
8652 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8654 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8656 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8657 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8660 ipr_cmd->job_step = ipr_reset_alert;
8663 return IPR_RC_JOB_CONTINUE;
8665 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8666 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8667 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8668 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8669 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8670 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8671 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8674 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8676 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8677 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8679 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8680 IPR_WRITE_BUFFER_TIMEOUT);
8683 return IPR_RC_JOB_RETURN;
8687 * ipr_reset_shutdown_ioa - Shutdown the adapter
8688 * @ipr_cmd: ipr command struct
8690 * Description: This function issues an adapter shutdown of the
8691 * specified type to the specified adapter as part of the
8692 * adapter reset job.
8695 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8697 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8699 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8700 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8701 unsigned long timeout;
8702 int rc = IPR_RC_JOB_CONTINUE;
8705 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8706 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8707 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8708 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8709 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8710 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8711 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8712 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8714 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8715 timeout = IPR_SHUTDOWN_TIMEOUT;
8716 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8717 timeout = IPR_INTERNAL_TIMEOUT;
8718 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8719 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8721 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8723 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8725 rc = IPR_RC_JOB_RETURN;
8726 ipr_cmd->job_step = ipr_reset_ucode_download;
8728 ipr_cmd->job_step = ipr_reset_alert;
8735 * ipr_reset_ioa_job - Adapter reset job
8736 * @ipr_cmd: ipr command struct
8738 * Description: This function is the job router for the adapter reset job.
8743 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8746 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8749 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8751 if (ioa_cfg->reset_cmd != ipr_cmd) {
8753 * We are doing nested adapter resets and this is
8754 * not the current reset job.
8756 list_add_tail(&ipr_cmd->queue,
8757 &ipr_cmd->hrrq->hrrq_free_q);
8761 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8762 rc = ipr_cmd->job_step_failed(ipr_cmd);
8763 if (rc == IPR_RC_JOB_RETURN)
8767 ipr_reinit_ipr_cmnd(ipr_cmd);
8768 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8769 rc = ipr_cmd->job_step(ipr_cmd);
8770 } while (rc == IPR_RC_JOB_CONTINUE);
8774 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8775 * @ioa_cfg: ioa config struct
8776 * @job_step: first job step of reset job
8777 * @shutdown_type: shutdown type
8779 * Description: This function will initiate the reset of the given adapter
8780 * starting at the selected job step.
8781 * If the caller needs to wait on the completion of the reset,
8782 * the caller must sleep on the reset_wait_q.
8787 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8788 int (*job_step) (struct ipr_cmnd *),
8789 enum ipr_shutdown_type shutdown_type)
8791 struct ipr_cmnd *ipr_cmd;
8794 ioa_cfg->in_reset_reload = 1;
8795 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8796 spin_lock(&ioa_cfg->hrrq[i]._lock);
8797 ioa_cfg->hrrq[i].allow_cmds = 0;
8798 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8801 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8802 scsi_block_requests(ioa_cfg->host);
8804 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8805 ioa_cfg->reset_cmd = ipr_cmd;
8806 ipr_cmd->job_step = job_step;
8807 ipr_cmd->u.shutdown_type = shutdown_type;
8809 ipr_reset_ioa_job(ipr_cmd);
8813 * ipr_initiate_ioa_reset - Initiate an adapter reset
8814 * @ioa_cfg: ioa config struct
8815 * @shutdown_type: shutdown type
8817 * Description: This function will initiate the reset of the given adapter.
8818 * If the caller needs to wait on the completion of the reset,
8819 * the caller must sleep on the reset_wait_q.
8824 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8825 enum ipr_shutdown_type shutdown_type)
8829 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8832 if (ioa_cfg->in_reset_reload) {
8833 if (ioa_cfg->sdt_state == GET_DUMP)
8834 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8835 else if (ioa_cfg->sdt_state == READ_DUMP)
8836 ioa_cfg->sdt_state = ABORT_DUMP;
8839 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8840 dev_err(&ioa_cfg->pdev->dev,
8841 "IOA taken offline - error recovery failed\n");
8843 ioa_cfg->reset_retries = 0;
8844 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8845 spin_lock(&ioa_cfg->hrrq[i]._lock);
8846 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8847 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8851 if (ioa_cfg->in_ioa_bringdown) {
8852 ioa_cfg->reset_cmd = NULL;
8853 ioa_cfg->in_reset_reload = 0;
8854 ipr_fail_all_ops(ioa_cfg);
8855 wake_up_all(&ioa_cfg->reset_wait_q);
8857 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8858 spin_unlock_irq(ioa_cfg->host->host_lock);
8859 scsi_unblock_requests(ioa_cfg->host);
8860 spin_lock_irq(ioa_cfg->host->host_lock);
8864 ioa_cfg->in_ioa_bringdown = 1;
8865 shutdown_type = IPR_SHUTDOWN_NONE;
8869 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8874 * ipr_reset_freeze - Hold off all I/O activity
8875 * @ipr_cmd: ipr command struct
8877 * Description: If the PCI slot is frozen, hold off all I/O
8878 * activity; then, as soon as the slot is available again,
8879 * initiate an adapter reset.
8881 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8886 /* Disallow new interrupts, avoid loop */
8887 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8888 spin_lock(&ioa_cfg->hrrq[i]._lock);
8889 ioa_cfg->hrrq[i].allow_interrupts = 0;
8890 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8893 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8894 ipr_cmd->done = ipr_reset_ioa_job;
8895 return IPR_RC_JOB_RETURN;
8899 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8900 * @pdev: PCI device struct
8902 * Description: This routine is called to tell us that the MMIO
8903 * access to the IOA has been restored
8905 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8907 unsigned long flags = 0;
8908 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8910 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8911 if (!ioa_cfg->probe_done)
8912 pci_save_state(pdev);
8913 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8914 return PCI_ERS_RESULT_NEED_RESET;
8918 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8919 * @pdev: PCI device struct
8921 * Description: This routine is called to tell us that the PCI bus
8922 * is down. Can't do anything here, except put the device driver
8923 * into a holding pattern, waiting for the PCI bus to come back.
8925 static void ipr_pci_frozen(struct pci_dev *pdev)
8927 unsigned long flags = 0;
8928 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8930 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8931 if (ioa_cfg->probe_done)
8932 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8937 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8938 * @pdev: PCI device struct
8940 * Description: This routine is called by the pci error recovery
8941 * code after the PCI slot has been reset, just before we
8942 * should resume normal operations.
8944 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8946 unsigned long flags = 0;
8947 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8949 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8950 if (ioa_cfg->probe_done) {
8951 if (ioa_cfg->needs_warm_reset)
8952 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8954 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8957 wake_up_all(&ioa_cfg->eeh_wait_q);
8958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8959 return PCI_ERS_RESULT_RECOVERED;
8963 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8964 * @pdev: PCI device struct
8966 * Description: This routine is called when the PCI bus has
8967 * permanently failed.
8969 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8971 unsigned long flags = 0;
8972 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8975 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8976 if (ioa_cfg->probe_done) {
8977 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8978 ioa_cfg->sdt_state = ABORT_DUMP;
8979 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8980 ioa_cfg->in_ioa_bringdown = 1;
8981 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8982 spin_lock(&ioa_cfg->hrrq[i]._lock);
8983 ioa_cfg->hrrq[i].allow_cmds = 0;
8984 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8987 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8989 wake_up_all(&ioa_cfg->eeh_wait_q);
8990 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8994 * ipr_pci_error_detected - Called when a PCI error is detected.
8995 * @pdev: PCI device struct
8996 * @state: PCI channel state
8998 * Description: Called when a PCI error is detected.
9001 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9003 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9004 pci_channel_state_t state)
9007 case pci_channel_io_frozen:
9008 ipr_pci_frozen(pdev);
9009 return PCI_ERS_RESULT_CAN_RECOVER;
9010 case pci_channel_io_perm_failure:
9011 ipr_pci_perm_failure(pdev);
9012 return PCI_ERS_RESULT_DISCONNECT;
9017 return PCI_ERS_RESULT_NEED_RESET;
9021 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9022 * @ioa_cfg: ioa cfg struct
9024 * Description: This is the second phase of adapter intialization
9025 * This function takes care of initilizing the adapter to the point
9026 * where it can accept new commands.
9029 * 0 on success / -EIO on failure
9031 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9034 unsigned long host_lock_flags = 0;
9037 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9038 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9039 ioa_cfg->probe_done = 1;
9040 if (ioa_cfg->needs_hard_reset) {
9041 ioa_cfg->needs_hard_reset = 0;
9042 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9044 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9046 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9053 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9054 * @ioa_cfg: ioa config struct
9059 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9063 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9064 if (ioa_cfg->ipr_cmnd_list[i])
9065 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9066 ioa_cfg->ipr_cmnd_list[i],
9067 ioa_cfg->ipr_cmnd_list_dma[i]);
9069 ioa_cfg->ipr_cmnd_list[i] = NULL;
9072 if (ioa_cfg->ipr_cmd_pool)
9073 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9075 kfree(ioa_cfg->ipr_cmnd_list);
9076 kfree(ioa_cfg->ipr_cmnd_list_dma);
9077 ioa_cfg->ipr_cmnd_list = NULL;
9078 ioa_cfg->ipr_cmnd_list_dma = NULL;
9079 ioa_cfg->ipr_cmd_pool = NULL;
9083 * ipr_free_mem - Frees memory allocated for an adapter
9084 * @ioa_cfg: ioa cfg struct
9089 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9093 kfree(ioa_cfg->res_entries);
9094 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9095 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9096 ipr_free_cmd_blks(ioa_cfg);
9098 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9099 dma_free_coherent(&ioa_cfg->pdev->dev,
9100 sizeof(u32) * ioa_cfg->hrrq[i].size,
9101 ioa_cfg->hrrq[i].host_rrq,
9102 ioa_cfg->hrrq[i].host_rrq_dma);
9104 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9105 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9107 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9108 dma_free_coherent(&ioa_cfg->pdev->dev,
9109 sizeof(struct ipr_hostrcb),
9110 ioa_cfg->hostrcb[i],
9111 ioa_cfg->hostrcb_dma[i]);
9114 ipr_free_dump(ioa_cfg);
9115 kfree(ioa_cfg->trace);
9119 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9120 * @ioa_cfg: ipr cfg struct
9122 * This function frees all allocated IRQs for the
9123 * specified adapter.
9128 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9130 struct pci_dev *pdev = ioa_cfg->pdev;
9132 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9133 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9135 for (i = 0; i < ioa_cfg->nvectors; i++)
9136 free_irq(ioa_cfg->vectors_info[i].vec,
9139 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9141 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9142 pci_disable_msi(pdev);
9143 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9144 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9145 pci_disable_msix(pdev);
9146 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9151 * ipr_free_all_resources - Free all allocated resources for an adapter.
9152 * @ipr_cmd: ipr command struct
9154 * This function frees all allocated resources for the
9155 * specified adapter.
9160 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9162 struct pci_dev *pdev = ioa_cfg->pdev;
9165 ipr_free_irqs(ioa_cfg);
9166 if (ioa_cfg->reset_work_q)
9167 destroy_workqueue(ioa_cfg->reset_work_q);
9168 iounmap(ioa_cfg->hdw_dma_regs);
9169 pci_release_regions(pdev);
9170 ipr_free_mem(ioa_cfg);
9171 scsi_host_put(ioa_cfg->host);
9172 pci_disable_device(pdev);
9177 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9178 * @ioa_cfg: ioa config struct
9181 * 0 on success / -ENOMEM on allocation failure
9183 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9185 struct ipr_cmnd *ipr_cmd;
9186 struct ipr_ioarcb *ioarcb;
9187 dma_addr_t dma_addr;
9188 int i, entries_each_hrrq, hrrq_id = 0;
9190 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9191 sizeof(struct ipr_cmnd), 512, 0);
9193 if (!ioa_cfg->ipr_cmd_pool)
9196 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9197 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9199 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9200 ipr_free_cmd_blks(ioa_cfg);
9204 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9205 if (ioa_cfg->hrrq_num > 1) {
9207 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9208 ioa_cfg->hrrq[i].min_cmd_id = 0;
9209 ioa_cfg->hrrq[i].max_cmd_id =
9210 (entries_each_hrrq - 1);
9213 IPR_NUM_BASE_CMD_BLKS/
9214 (ioa_cfg->hrrq_num - 1);
9215 ioa_cfg->hrrq[i].min_cmd_id =
9216 IPR_NUM_INTERNAL_CMD_BLKS +
9217 (i - 1) * entries_each_hrrq;
9218 ioa_cfg->hrrq[i].max_cmd_id =
9219 (IPR_NUM_INTERNAL_CMD_BLKS +
9220 i * entries_each_hrrq - 1);
9223 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9224 ioa_cfg->hrrq[i].min_cmd_id = 0;
9225 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9227 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9230 BUG_ON(ioa_cfg->hrrq_num == 0);
9232 i = IPR_NUM_CMD_BLKS -
9233 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9235 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9236 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9239 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9240 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9243 ipr_free_cmd_blks(ioa_cfg);
9247 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9248 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9249 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9251 ioarcb = &ipr_cmd->ioarcb;
9252 ipr_cmd->dma_addr = dma_addr;
9254 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9256 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9258 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9259 if (ioa_cfg->sis64) {
9260 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9261 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9262 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9263 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9265 ioarcb->write_ioadl_addr =
9266 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9267 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9268 ioarcb->ioasa_host_pci_addr =
9269 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9271 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9272 ipr_cmd->cmd_index = i;
9273 ipr_cmd->ioa_cfg = ioa_cfg;
9274 ipr_cmd->sense_buffer_dma = dma_addr +
9275 offsetof(struct ipr_cmnd, sense_buffer);
9277 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9278 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9279 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9280 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9288 * ipr_alloc_mem - Allocate memory for an adapter
9289 * @ioa_cfg: ioa config struct
9292 * 0 on success / non-zero for error
9294 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9296 struct pci_dev *pdev = ioa_cfg->pdev;
9297 int i, rc = -ENOMEM;
9300 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9301 ioa_cfg->max_devs_supported, GFP_KERNEL);
9303 if (!ioa_cfg->res_entries)
9306 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9307 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9308 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9311 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9312 sizeof(struct ipr_misc_cbs),
9313 &ioa_cfg->vpd_cbs_dma,
9316 if (!ioa_cfg->vpd_cbs)
9317 goto out_free_res_entries;
9319 if (ipr_alloc_cmd_blks(ioa_cfg))
9320 goto out_free_vpd_cbs;
9322 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9323 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9324 sizeof(u32) * ioa_cfg->hrrq[i].size,
9325 &ioa_cfg->hrrq[i].host_rrq_dma,
9328 if (!ioa_cfg->hrrq[i].host_rrq) {
9330 dma_free_coherent(&pdev->dev,
9331 sizeof(u32) * ioa_cfg->hrrq[i].size,
9332 ioa_cfg->hrrq[i].host_rrq,
9333 ioa_cfg->hrrq[i].host_rrq_dma);
9334 goto out_ipr_free_cmd_blocks;
9336 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9339 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9340 ioa_cfg->cfg_table_size,
9341 &ioa_cfg->cfg_table_dma,
9344 if (!ioa_cfg->u.cfg_table)
9345 goto out_free_host_rrq;
9347 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9348 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9349 sizeof(struct ipr_hostrcb),
9350 &ioa_cfg->hostrcb_dma[i],
9353 if (!ioa_cfg->hostrcb[i])
9354 goto out_free_hostrcb_dma;
9356 ioa_cfg->hostrcb[i]->hostrcb_dma =
9357 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9358 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9359 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9362 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9363 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9365 if (!ioa_cfg->trace)
9366 goto out_free_hostrcb_dma;
9373 out_free_hostrcb_dma:
9375 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9376 ioa_cfg->hostrcb[i],
9377 ioa_cfg->hostrcb_dma[i]);
9379 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9380 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9382 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9383 dma_free_coherent(&pdev->dev,
9384 sizeof(u32) * ioa_cfg->hrrq[i].size,
9385 ioa_cfg->hrrq[i].host_rrq,
9386 ioa_cfg->hrrq[i].host_rrq_dma);
9388 out_ipr_free_cmd_blocks:
9389 ipr_free_cmd_blks(ioa_cfg);
9391 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9392 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9393 out_free_res_entries:
9394 kfree(ioa_cfg->res_entries);
9399 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9400 * @ioa_cfg: ioa config struct
9405 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9409 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9410 ioa_cfg->bus_attr[i].bus = i;
9411 ioa_cfg->bus_attr[i].qas_enabled = 0;
9412 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9413 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9414 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9416 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9421 * ipr_init_regs - Initialize IOA registers
9422 * @ioa_cfg: ioa config struct
9427 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9429 const struct ipr_interrupt_offsets *p;
9430 struct ipr_interrupts *t;
9433 p = &ioa_cfg->chip_cfg->regs;
9435 base = ioa_cfg->hdw_dma_regs;
9437 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9438 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9439 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9440 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9441 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9442 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9443 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9444 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9445 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9446 t->ioarrin_reg = base + p->ioarrin_reg;
9447 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9448 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9449 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9450 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9451 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9452 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9454 if (ioa_cfg->sis64) {
9455 t->init_feedback_reg = base + p->init_feedback_reg;
9456 t->dump_addr_reg = base + p->dump_addr_reg;
9457 t->dump_data_reg = base + p->dump_data_reg;
9458 t->endian_swap_reg = base + p->endian_swap_reg;
9463 * ipr_init_ioa_cfg - Initialize IOA config struct
9464 * @ioa_cfg: ioa config struct
9465 * @host: scsi host struct
9466 * @pdev: PCI dev struct
9471 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9472 struct Scsi_Host *host, struct pci_dev *pdev)
9476 ioa_cfg->host = host;
9477 ioa_cfg->pdev = pdev;
9478 ioa_cfg->log_level = ipr_log_level;
9479 ioa_cfg->doorbell = IPR_DOORBELL;
9480 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9481 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9482 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9483 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9484 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9485 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9487 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9488 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9489 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9490 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9491 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9492 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9493 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9494 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9495 ioa_cfg->sdt_state = INACTIVE;
9497 ipr_initialize_bus_attr(ioa_cfg);
9498 ioa_cfg->max_devs_supported = ipr_max_devs;
9500 if (ioa_cfg->sis64) {
9501 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9502 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9503 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9504 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9505 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9506 + ((sizeof(struct ipr_config_table_entry64)
9507 * ioa_cfg->max_devs_supported)));
9509 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9510 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9511 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9512 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9513 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9514 + ((sizeof(struct ipr_config_table_entry)
9515 * ioa_cfg->max_devs_supported)));
9518 host->max_channel = IPR_VSET_BUS;
9519 host->unique_id = host->host_no;
9520 host->max_cmd_len = IPR_MAX_CDB_LEN;
9521 host->can_queue = ioa_cfg->max_cmds;
9522 pci_set_drvdata(pdev, ioa_cfg);
9524 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9525 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9526 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9527 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9529 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9531 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9536 * ipr_get_chip_info - Find adapter chip information
9537 * @dev_id: PCI device id struct
9540 * ptr to chip information on success / NULL on failure
9542 static const struct ipr_chip_t *
9543 ipr_get_chip_info(const struct pci_device_id *dev_id)
9547 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9548 if (ipr_chip[i].vendor == dev_id->vendor &&
9549 ipr_chip[i].device == dev_id->device)
9550 return &ipr_chip[i];
9555 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9557 * @ioa_cfg: ioa config struct
9562 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9564 struct pci_dev *pdev = ioa_cfg->pdev;
9566 if (pci_channel_offline(pdev)) {
9567 wait_event_timeout(ioa_cfg->eeh_wait_q,
9568 !pci_channel_offline(pdev),
9569 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9570 pci_restore_state(pdev);
9574 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9576 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9579 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9580 entries[i].entry = i;
9582 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9583 entries, 1, ipr_number_of_msix);
9585 ipr_wait_for_pci_err_recovery(ioa_cfg);
9589 for (i = 0; i < vectors; i++)
9590 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9591 ioa_cfg->nvectors = vectors;
9596 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9600 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9602 ipr_wait_for_pci_err_recovery(ioa_cfg);
9606 for (i = 0; i < vectors; i++)
9607 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9608 ioa_cfg->nvectors = vectors;
9613 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9615 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9617 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9618 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9619 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9620 ioa_cfg->vectors_info[vec_idx].
9621 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9625 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9629 for (i = 1; i < ioa_cfg->nvectors; i++) {
9630 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9633 ioa_cfg->vectors_info[i].desc,
9637 free_irq(ioa_cfg->vectors_info[i].vec,
9646 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9647 * @pdev: PCI device struct
9649 * Description: Simply set the msi_received flag to 1 indicating that
9650 * Message Signaled Interrupts are supported.
9653 * 0 on success / non-zero on failure
9655 static irqreturn_t ipr_test_intr(int irq, void *devp)
9657 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9658 unsigned long lock_flags = 0;
9659 irqreturn_t rc = IRQ_HANDLED;
9661 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9662 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9664 ioa_cfg->msi_received = 1;
9665 wake_up(&ioa_cfg->msi_wait_q);
9667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9672 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9673 * @pdev: PCI device struct
9675 * Description: The return value from pci_enable_msi_range() can not always be
9676 * trusted. This routine sets up and initiates a test interrupt to determine
9677 * if the interrupt is received via the ipr_test_intr() service routine.
9678 * If the tests fails, the driver will fall back to LSI.
9681 * 0 on success / non-zero on failure
9683 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9686 volatile u32 int_reg;
9687 unsigned long lock_flags = 0;
9691 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9692 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9693 ioa_cfg->msi_received = 0;
9694 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9695 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9696 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9699 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9700 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9702 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9704 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9706 } else if (ipr_debug)
9707 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9709 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9710 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9711 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9712 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9713 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9715 if (!ioa_cfg->msi_received) {
9716 /* MSI test failed */
9717 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9719 } else if (ipr_debug)
9720 dev_info(&pdev->dev, "MSI test succeeded.\n");
9722 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9724 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9725 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9727 free_irq(pdev->irq, ioa_cfg);
9734 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9735 * @pdev: PCI device struct
9736 * @dev_id: PCI device id struct
9739 * 0 on success / non-zero on failure
9741 static int ipr_probe_ioa(struct pci_dev *pdev,
9742 const struct pci_device_id *dev_id)
9744 struct ipr_ioa_cfg *ioa_cfg;
9745 struct Scsi_Host *host;
9746 unsigned long ipr_regs_pci;
9747 void __iomem *ipr_regs;
9748 int rc = PCIBIOS_SUCCESSFUL;
9749 volatile u32 mask, uproc, interrupts;
9750 unsigned long lock_flags, driver_lock_flags;
9754 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9755 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9758 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9763 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9764 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9765 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9767 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9769 if (!ioa_cfg->ipr_chip) {
9770 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9771 dev_id->vendor, dev_id->device);
9772 goto out_scsi_host_put;
9775 /* set SIS 32 or SIS 64 */
9776 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9777 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9778 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9779 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9781 if (ipr_transop_timeout)
9782 ioa_cfg->transop_timeout = ipr_transop_timeout;
9783 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9784 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9786 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9788 ioa_cfg->revid = pdev->revision;
9790 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9792 ipr_regs_pci = pci_resource_start(pdev, 0);
9794 rc = pci_request_regions(pdev, IPR_NAME);
9797 "Couldn't register memory range of registers\n");
9798 goto out_scsi_host_put;
9801 rc = pci_enable_device(pdev);
9803 if (rc || pci_channel_offline(pdev)) {
9804 if (pci_channel_offline(pdev)) {
9805 ipr_wait_for_pci_err_recovery(ioa_cfg);
9806 rc = pci_enable_device(pdev);
9810 dev_err(&pdev->dev, "Cannot enable adapter\n");
9811 ipr_wait_for_pci_err_recovery(ioa_cfg);
9812 goto out_release_regions;
9816 ipr_regs = pci_ioremap_bar(pdev, 0);
9820 "Couldn't map memory range of registers\n");
9825 ioa_cfg->hdw_dma_regs = ipr_regs;
9826 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9827 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9829 ipr_init_regs(ioa_cfg);
9831 if (ioa_cfg->sis64) {
9832 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9834 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9835 rc = dma_set_mask_and_coherent(&pdev->dev,
9839 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9842 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9846 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9847 ioa_cfg->chip_cfg->cache_line_size);
9849 if (rc != PCIBIOS_SUCCESSFUL) {
9850 dev_err(&pdev->dev, "Write of cache line size failed\n");
9851 ipr_wait_for_pci_err_recovery(ioa_cfg);
9856 /* Issue MMIO read to ensure card is not in EEH */
9857 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9858 ipr_wait_for_pci_err_recovery(ioa_cfg);
9860 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9861 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9862 IPR_MAX_MSIX_VECTORS);
9863 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9866 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9867 ipr_enable_msix(ioa_cfg) == 0)
9868 ioa_cfg->intr_flag = IPR_USE_MSIX;
9869 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9870 ipr_enable_msi(ioa_cfg) == 0)
9871 ioa_cfg->intr_flag = IPR_USE_MSI;
9873 ioa_cfg->intr_flag = IPR_USE_LSI;
9874 ioa_cfg->nvectors = 1;
9875 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9878 pci_set_master(pdev);
9880 if (pci_channel_offline(pdev)) {
9881 ipr_wait_for_pci_err_recovery(ioa_cfg);
9882 pci_set_master(pdev);
9883 if (pci_channel_offline(pdev)) {
9885 goto out_msi_disable;
9889 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9890 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9891 rc = ipr_test_msi(ioa_cfg, pdev);
9892 if (rc == -EOPNOTSUPP) {
9893 ipr_wait_for_pci_err_recovery(ioa_cfg);
9894 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9895 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9896 pci_disable_msi(pdev);
9897 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9898 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9899 pci_disable_msix(pdev);
9902 ioa_cfg->intr_flag = IPR_USE_LSI;
9903 ioa_cfg->nvectors = 1;
9906 goto out_msi_disable;
9908 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9909 dev_info(&pdev->dev,
9910 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9911 ioa_cfg->nvectors, pdev->irq);
9912 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9913 dev_info(&pdev->dev,
9914 "Request for %d MSIXs succeeded.",
9919 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9920 (unsigned int)num_online_cpus(),
9921 (unsigned int)IPR_MAX_HRRQ_NUM);
9923 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9924 goto out_msi_disable;
9926 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9927 goto out_msi_disable;
9929 rc = ipr_alloc_mem(ioa_cfg);
9932 "Couldn't allocate enough memory for device driver!\n");
9933 goto out_msi_disable;
9936 /* Save away PCI config space for use following IOA reset */
9937 rc = pci_save_state(pdev);
9939 if (rc != PCIBIOS_SUCCESSFUL) {
9940 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9946 * If HRRQ updated interrupt is not masked, or reset alert is set,
9947 * the card is in an unknown state and needs a hard reset
9949 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9950 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9951 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9952 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9953 ioa_cfg->needs_hard_reset = 1;
9954 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9955 ioa_cfg->needs_hard_reset = 1;
9956 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9957 ioa_cfg->ioa_unit_checked = 1;
9959 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9960 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9961 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9963 if (ioa_cfg->intr_flag == IPR_USE_MSI
9964 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9965 name_msi_vectors(ioa_cfg);
9966 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9968 ioa_cfg->vectors_info[0].desc,
9971 rc = ipr_request_other_msi_irqs(ioa_cfg);
9973 rc = request_irq(pdev->irq, ipr_isr,
9975 IPR_NAME, &ioa_cfg->hrrq[0]);
9978 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9983 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9984 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9985 ioa_cfg->needs_warm_reset = 1;
9986 ioa_cfg->reset = ipr_reset_slot_reset;
9988 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9989 WQ_MEM_RECLAIM, host->host_no);
9991 if (!ioa_cfg->reset_work_q) {
9992 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
9996 ioa_cfg->reset = ipr_reset_start_bist;
9998 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9999 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10000 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10007 ipr_free_irqs(ioa_cfg);
10009 ipr_free_mem(ioa_cfg);
10011 ipr_wait_for_pci_err_recovery(ioa_cfg);
10012 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10013 pci_disable_msi(pdev);
10014 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10015 pci_disable_msix(pdev);
10019 pci_disable_device(pdev);
10020 out_release_regions:
10021 pci_release_regions(pdev);
10023 scsi_host_put(host);
10028 * ipr_initiate_ioa_bringdown - Bring down an adapter
10029 * @ioa_cfg: ioa config struct
10030 * @shutdown_type: shutdown type
10032 * Description: This function will initiate bringing down the adapter.
10033 * This consists of issuing an IOA shutdown to the adapter
10034 * to flush the cache, and running BIST.
10035 * If the caller needs to wait on the completion of the reset,
10036 * the caller must sleep on the reset_wait_q.
10041 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10042 enum ipr_shutdown_type shutdown_type)
10045 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10046 ioa_cfg->sdt_state = ABORT_DUMP;
10047 ioa_cfg->reset_retries = 0;
10048 ioa_cfg->in_ioa_bringdown = 1;
10049 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10054 * __ipr_remove - Remove a single adapter
10055 * @pdev: pci device struct
10057 * Adapter hot plug remove entry point.
10062 static void __ipr_remove(struct pci_dev *pdev)
10064 unsigned long host_lock_flags = 0;
10065 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10067 unsigned long driver_lock_flags;
10070 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10071 while (ioa_cfg->in_reset_reload) {
10072 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10073 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10074 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10077 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10078 spin_lock(&ioa_cfg->hrrq[i]._lock);
10079 ioa_cfg->hrrq[i].removing_ioa = 1;
10080 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10083 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10085 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10086 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10087 flush_work(&ioa_cfg->work_q);
10088 if (ioa_cfg->reset_work_q)
10089 flush_workqueue(ioa_cfg->reset_work_q);
10090 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10091 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10093 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10094 list_del(&ioa_cfg->queue);
10095 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10097 if (ioa_cfg->sdt_state == ABORT_DUMP)
10098 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10099 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10101 ipr_free_all_resources(ioa_cfg);
10107 * ipr_remove - IOA hot plug remove entry point
10108 * @pdev: pci device struct
10110 * Adapter hot plug remove entry point.
10115 static void ipr_remove(struct pci_dev *pdev)
10117 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10121 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10123 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10125 scsi_remove_host(ioa_cfg->host);
10127 __ipr_remove(pdev);
10133 * ipr_probe - Adapter hot plug add entry point
10136 * 0 on success / non-zero on failure
10138 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10140 struct ipr_ioa_cfg *ioa_cfg;
10143 rc = ipr_probe_ioa(pdev, dev_id);
10148 ioa_cfg = pci_get_drvdata(pdev);
10149 rc = ipr_probe_ioa_part2(ioa_cfg);
10152 __ipr_remove(pdev);
10156 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10159 __ipr_remove(pdev);
10163 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10167 scsi_remove_host(ioa_cfg->host);
10168 __ipr_remove(pdev);
10172 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10176 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10178 scsi_remove_host(ioa_cfg->host);
10179 __ipr_remove(pdev);
10183 scsi_scan_host(ioa_cfg->host);
10184 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10186 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10187 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10188 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10189 ioa_cfg->iopoll_weight, ipr_iopoll);
10190 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10194 schedule_work(&ioa_cfg->work_q);
10199 * ipr_shutdown - Shutdown handler.
10200 * @pdev: pci device struct
10202 * This function is invoked upon system shutdown/reboot. It will issue
10203 * an adapter shutdown to the adapter to flush the write cache.
10208 static void ipr_shutdown(struct pci_dev *pdev)
10210 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10211 unsigned long lock_flags = 0;
10212 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10216 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10217 ioa_cfg->iopoll_weight = 0;
10218 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10219 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10222 while (ioa_cfg->in_reset_reload) {
10223 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10224 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10225 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10228 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10229 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10231 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10233 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10234 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10235 ipr_free_irqs(ioa_cfg);
10236 pci_disable_device(ioa_cfg->pdev);
10240 static struct pci_device_id ipr_pci_table[] = {
10241 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10242 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10243 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10244 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10245 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10246 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10247 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10248 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10249 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10250 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10251 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10252 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10253 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10254 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10255 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10256 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10257 IPR_USE_LONG_TRANSOP_TIMEOUT },
10258 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10259 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10260 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10261 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10262 IPR_USE_LONG_TRANSOP_TIMEOUT },
10263 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10264 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10265 IPR_USE_LONG_TRANSOP_TIMEOUT },
10266 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10267 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10268 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10269 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10270 IPR_USE_LONG_TRANSOP_TIMEOUT},
10271 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10272 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10273 IPR_USE_LONG_TRANSOP_TIMEOUT },
10274 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10275 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10276 IPR_USE_LONG_TRANSOP_TIMEOUT },
10277 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10278 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10279 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10280 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10281 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10282 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10283 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10284 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10285 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10286 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10287 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10288 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10289 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10290 IPR_USE_LONG_TRANSOP_TIMEOUT },
10291 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10292 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10293 IPR_USE_LONG_TRANSOP_TIMEOUT },
10294 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10295 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10296 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10297 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10298 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10299 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10300 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10301 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10302 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10303 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10304 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10305 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10306 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10307 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10308 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10309 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10310 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10311 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10312 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10313 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10314 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10315 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10316 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10317 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10318 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10319 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10320 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10321 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10322 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10323 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10324 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10325 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10326 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10327 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10328 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10329 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10330 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10331 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10332 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10333 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10334 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10335 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10336 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10337 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10338 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10339 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10340 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10341 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10342 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10343 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10344 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10345 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10348 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10350 static const struct pci_error_handlers ipr_err_handler = {
10351 .error_detected = ipr_pci_error_detected,
10352 .mmio_enabled = ipr_pci_mmio_enabled,
10353 .slot_reset = ipr_pci_slot_reset,
10356 static struct pci_driver ipr_driver = {
10358 .id_table = ipr_pci_table,
10359 .probe = ipr_probe,
10360 .remove = ipr_remove,
10361 .shutdown = ipr_shutdown,
10362 .err_handler = &ipr_err_handler,
10366 * ipr_halt_done - Shutdown prepare completion
10371 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10373 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10377 * ipr_halt - Issue shutdown prepare to all adapters
10380 * NOTIFY_OK on success / NOTIFY_DONE on failure
10382 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10384 struct ipr_cmnd *ipr_cmd;
10385 struct ipr_ioa_cfg *ioa_cfg;
10386 unsigned long flags = 0, driver_lock_flags;
10388 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10389 return NOTIFY_DONE;
10391 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10393 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10394 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10395 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10396 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10397 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10401 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10402 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10403 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10404 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10405 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10407 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10410 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10415 static struct notifier_block ipr_notifier = {
10420 * ipr_init - Module entry point
10423 * 0 on success / negative value on failure
10425 static int __init ipr_init(void)
10427 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10428 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10430 register_reboot_notifier(&ipr_notifier);
10431 return pci_register_driver(&ipr_driver);
10435 * ipr_exit - Module unload
10437 * Module unload entry point.
10442 static void __exit ipr_exit(void)
10444 unregister_reboot_notifier(&ipr_notifier);
10445 pci_unregister_driver(&ipr_driver);
10448 module_init(ipr_init);
10449 module_exit(ipr_exit);