]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/hpsa.c
8eab107b53fbab2641a5545b964873ad57677072
[karo-tx-linux.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/fs.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
55 #include "hpsa_cmd.h"
56 #include "hpsa.h"
57
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.4-1"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61 #define HPSA "hpsa"
62
63 /* How long to wait for CISS doorbell communication */
64 #define CLEAR_EVENT_WAIT_INTERVAL 20    /* ms for each msleep() call */
65 #define MODE_CHANGE_WAIT_INTERVAL 10    /* ms for each msleep() call */
66 #define MAX_CLEAR_EVENT_WAIT 30000      /* times 20 ms = 600 s */
67 #define MAX_MODE_CHANGE_WAIT 2000       /* times 10 ms = 20 s */
68 #define MAX_IOCTL_CONFIG_WAIT 1000
69
70 /*define how many times we will try a command because of bus resets */
71 #define MAX_CMD_RETRIES 3
72
73 /* Embedded module documentation macros - see modules.h */
74 MODULE_AUTHOR("Hewlett-Packard Company");
75 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
76         HPSA_DRIVER_VERSION);
77 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
78 MODULE_VERSION(HPSA_DRIVER_VERSION);
79 MODULE_LICENSE("GPL");
80
81 static int hpsa_allow_any;
82 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
83 MODULE_PARM_DESC(hpsa_allow_any,
84                 "Allow hpsa driver to access unknown HP Smart Array hardware");
85 static int hpsa_simple_mode;
86 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
87 MODULE_PARM_DESC(hpsa_simple_mode,
88         "Use 'simple mode' rather than 'performant mode'");
89
90 /* define the PCI info for the cards we can control */
91 static const struct pci_device_id hpsa_pci_device_id[] = {
92         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
93         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
129         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
130         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
131         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
132         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
133         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
134         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
135         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
136         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
137         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
138                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
139         {0,}
140 };
141
142 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
143
144 /*  board_id = Subsystem Device ID & Vendor ID
145  *  product = Marketing Name for the board
146  *  access = Address of the struct of function pointers
147  */
148 static struct board_type products[] = {
149         {0x3241103C, "Smart Array P212", &SA5_access},
150         {0x3243103C, "Smart Array P410", &SA5_access},
151         {0x3245103C, "Smart Array P410i", &SA5_access},
152         {0x3247103C, "Smart Array P411", &SA5_access},
153         {0x3249103C, "Smart Array P812", &SA5_access},
154         {0x324A103C, "Smart Array P712m", &SA5_access},
155         {0x324B103C, "Smart Array P711m", &SA5_access},
156         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
157         {0x3350103C, "Smart Array P222", &SA5_access},
158         {0x3351103C, "Smart Array P420", &SA5_access},
159         {0x3352103C, "Smart Array P421", &SA5_access},
160         {0x3353103C, "Smart Array P822", &SA5_access},
161         {0x3354103C, "Smart Array P420i", &SA5_access},
162         {0x3355103C, "Smart Array P220i", &SA5_access},
163         {0x3356103C, "Smart Array P721m", &SA5_access},
164         {0x1921103C, "Smart Array P830i", &SA5_access},
165         {0x1922103C, "Smart Array P430", &SA5_access},
166         {0x1923103C, "Smart Array P431", &SA5_access},
167         {0x1924103C, "Smart Array P830", &SA5_access},
168         {0x1926103C, "Smart Array P731m", &SA5_access},
169         {0x1928103C, "Smart Array P230i", &SA5_access},
170         {0x1929103C, "Smart Array P530", &SA5_access},
171         {0x21BD103C, "Smart Array P244br", &SA5_access},
172         {0x21BE103C, "Smart Array P741m", &SA5_access},
173         {0x21BF103C, "Smart HBA H240ar", &SA5_access},
174         {0x21C0103C, "Smart Array P440ar", &SA5_access},
175         {0x21C1103C, "Smart Array P840ar", &SA5_access},
176         {0x21C2103C, "Smart Array P440", &SA5_access},
177         {0x21C3103C, "Smart Array P441", &SA5_access},
178         {0x21C4103C, "Smart Array", &SA5_access},
179         {0x21C5103C, "Smart Array P841", &SA5_access},
180         {0x21C6103C, "Smart HBA H244br", &SA5_access},
181         {0x21C7103C, "Smart HBA H240", &SA5_access},
182         {0x21C8103C, "Smart HBA H241", &SA5_access},
183         {0x21C9103C, "Smart Array", &SA5_access},
184         {0x21CA103C, "Smart Array P246br", &SA5_access},
185         {0x21CB103C, "Smart Array P840", &SA5_access},
186         {0x21CC103C, "Smart Array", &SA5_access},
187         {0x21CD103C, "Smart Array", &SA5_access},
188         {0x21CE103C, "Smart HBA", &SA5_access},
189         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
190         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
191         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
192         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
193         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
194         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
195 };
196
197 static int number_of_controllers;
198
199 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
200 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
201 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
202
203 #ifdef CONFIG_COMPAT
204 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
205         void __user *arg);
206 #endif
207
208 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
209 static struct CommandList *cmd_alloc(struct ctlr_info *h);
210 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
211         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
212         int cmd_type);
213 static void hpsa_free_cmd_pool(struct ctlr_info *h);
214 #define VPD_PAGE (1 << 8)
215
216 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
217 static void hpsa_scan_start(struct Scsi_Host *);
218 static int hpsa_scan_finished(struct Scsi_Host *sh,
219         unsigned long elapsed_time);
220 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
221
222 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
223 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
224 static int hpsa_slave_alloc(struct scsi_device *sdev);
225 static void hpsa_slave_destroy(struct scsi_device *sdev);
226
227 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
228 static int check_for_unit_attention(struct ctlr_info *h,
229         struct CommandList *c);
230 static void check_ioctl_unit_attention(struct ctlr_info *h,
231         struct CommandList *c);
232 /* performant mode helper functions */
233 static void calc_bucket_map(int *bucket, int num_buckets,
234         int nsgs, int min_blocks, u32 *bucket_map);
235 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
236 static inline u32 next_command(struct ctlr_info *h, u8 q);
237 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
238                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
239                                u64 *cfg_offset);
240 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
241                                     unsigned long *memory_bar);
242 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
243 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
244                                      int wait_for_ready);
245 static inline void finish_cmd(struct CommandList *c);
246 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
247 #define BOARD_NOT_READY 0
248 #define BOARD_READY 1
249 static void hpsa_drain_accel_commands(struct ctlr_info *h);
250 static void hpsa_flush_cache(struct ctlr_info *h);
251 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
252         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
253         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
254 static void hpsa_command_resubmit_worker(struct work_struct *work);
255
256 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
257 {
258         unsigned long *priv = shost_priv(sdev->host);
259         return (struct ctlr_info *) *priv;
260 }
261
262 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
263 {
264         unsigned long *priv = shost_priv(sh);
265         return (struct ctlr_info *) *priv;
266 }
267
268 static int check_for_unit_attention(struct ctlr_info *h,
269         struct CommandList *c)
270 {
271         if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
272                 return 0;
273
274         switch (c->err_info->SenseInfo[12]) {
275         case STATE_CHANGED:
276                 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
277                         "detected, command retried\n", h->ctlr);
278                 break;
279         case LUN_FAILED:
280                 dev_warn(&h->pdev->dev,
281                         HPSA "%d: LUN failure detected\n", h->ctlr);
282                 break;
283         case REPORT_LUNS_CHANGED:
284                 dev_warn(&h->pdev->dev,
285                         HPSA "%d: report LUN data changed\n", h->ctlr);
286         /*
287          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
288          * target (array) devices.
289          */
290                 break;
291         case POWER_OR_RESET:
292                 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
293                         "or device reset detected\n", h->ctlr);
294                 break;
295         case UNIT_ATTENTION_CLEARED:
296                 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
297                     "cleared by another initiator\n", h->ctlr);
298                 break;
299         default:
300                 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
301                         "unit attention detected\n", h->ctlr);
302                 break;
303         }
304         return 1;
305 }
306
307 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
308 {
309         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
310                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
311                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
312                 return 0;
313         dev_warn(&h->pdev->dev, HPSA "device busy");
314         return 1;
315 }
316
317 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
318                                          struct device_attribute *attr,
319                                          const char *buf, size_t count)
320 {
321         int status, len;
322         struct ctlr_info *h;
323         struct Scsi_Host *shost = class_to_shost(dev);
324         char tmpbuf[10];
325
326         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
327                 return -EACCES;
328         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
329         strncpy(tmpbuf, buf, len);
330         tmpbuf[len] = '\0';
331         if (sscanf(tmpbuf, "%d", &status) != 1)
332                 return -EINVAL;
333         h = shost_to_hba(shost);
334         h->acciopath_status = !!status;
335         dev_warn(&h->pdev->dev,
336                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
337                 h->acciopath_status ? "enabled" : "disabled");
338         return count;
339 }
340
341 static ssize_t host_store_raid_offload_debug(struct device *dev,
342                                          struct device_attribute *attr,
343                                          const char *buf, size_t count)
344 {
345         int debug_level, len;
346         struct ctlr_info *h;
347         struct Scsi_Host *shost = class_to_shost(dev);
348         char tmpbuf[10];
349
350         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
351                 return -EACCES;
352         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
353         strncpy(tmpbuf, buf, len);
354         tmpbuf[len] = '\0';
355         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
356                 return -EINVAL;
357         if (debug_level < 0)
358                 debug_level = 0;
359         h = shost_to_hba(shost);
360         h->raid_offload_debug = debug_level;
361         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
362                 h->raid_offload_debug);
363         return count;
364 }
365
366 static ssize_t host_store_rescan(struct device *dev,
367                                  struct device_attribute *attr,
368                                  const char *buf, size_t count)
369 {
370         struct ctlr_info *h;
371         struct Scsi_Host *shost = class_to_shost(dev);
372         h = shost_to_hba(shost);
373         hpsa_scan_start(h->scsi_host);
374         return count;
375 }
376
377 static ssize_t host_show_firmware_revision(struct device *dev,
378              struct device_attribute *attr, char *buf)
379 {
380         struct ctlr_info *h;
381         struct Scsi_Host *shost = class_to_shost(dev);
382         unsigned char *fwrev;
383
384         h = shost_to_hba(shost);
385         if (!h->hba_inquiry_data)
386                 return 0;
387         fwrev = &h->hba_inquiry_data[32];
388         return snprintf(buf, 20, "%c%c%c%c\n",
389                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
390 }
391
392 static ssize_t host_show_commands_outstanding(struct device *dev,
393              struct device_attribute *attr, char *buf)
394 {
395         struct Scsi_Host *shost = class_to_shost(dev);
396         struct ctlr_info *h = shost_to_hba(shost);
397
398         return snprintf(buf, 20, "%d\n",
399                         atomic_read(&h->commands_outstanding));
400 }
401
402 static ssize_t host_show_transport_mode(struct device *dev,
403         struct device_attribute *attr, char *buf)
404 {
405         struct ctlr_info *h;
406         struct Scsi_Host *shost = class_to_shost(dev);
407
408         h = shost_to_hba(shost);
409         return snprintf(buf, 20, "%s\n",
410                 h->transMethod & CFGTBL_Trans_Performant ?
411                         "performant" : "simple");
412 }
413
414 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
415         struct device_attribute *attr, char *buf)
416 {
417         struct ctlr_info *h;
418         struct Scsi_Host *shost = class_to_shost(dev);
419
420         h = shost_to_hba(shost);
421         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
422                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
423 }
424
425 /* List of controllers which cannot be hard reset on kexec with reset_devices */
426 static u32 unresettable_controller[] = {
427         0x324a103C, /* Smart Array P712m */
428         0x324b103C, /* SmartArray P711m */
429         0x3223103C, /* Smart Array P800 */
430         0x3234103C, /* Smart Array P400 */
431         0x3235103C, /* Smart Array P400i */
432         0x3211103C, /* Smart Array E200i */
433         0x3212103C, /* Smart Array E200 */
434         0x3213103C, /* Smart Array E200i */
435         0x3214103C, /* Smart Array E200i */
436         0x3215103C, /* Smart Array E200i */
437         0x3237103C, /* Smart Array E500 */
438         0x323D103C, /* Smart Array P700m */
439         0x40800E11, /* Smart Array 5i */
440         0x409C0E11, /* Smart Array 6400 */
441         0x409D0E11, /* Smart Array 6400 EM */
442         0x40700E11, /* Smart Array 5300 */
443         0x40820E11, /* Smart Array 532 */
444         0x40830E11, /* Smart Array 5312 */
445         0x409A0E11, /* Smart Array 641 */
446         0x409B0E11, /* Smart Array 642 */
447         0x40910E11, /* Smart Array 6i */
448 };
449
450 /* List of controllers which cannot even be soft reset */
451 static u32 soft_unresettable_controller[] = {
452         0x40800E11, /* Smart Array 5i */
453         0x40700E11, /* Smart Array 5300 */
454         0x40820E11, /* Smart Array 532 */
455         0x40830E11, /* Smart Array 5312 */
456         0x409A0E11, /* Smart Array 641 */
457         0x409B0E11, /* Smart Array 642 */
458         0x40910E11, /* Smart Array 6i */
459         /* Exclude 640x boards.  These are two pci devices in one slot
460          * which share a battery backed cache module.  One controls the
461          * cache, the other accesses the cache through the one that controls
462          * it.  If we reset the one controlling the cache, the other will
463          * likely not be happy.  Just forbid resetting this conjoined mess.
464          * The 640x isn't really supported by hpsa anyway.
465          */
466         0x409C0E11, /* Smart Array 6400 */
467         0x409D0E11, /* Smart Array 6400 EM */
468 };
469
470 static int ctlr_is_hard_resettable(u32 board_id)
471 {
472         int i;
473
474         for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
475                 if (unresettable_controller[i] == board_id)
476                         return 0;
477         return 1;
478 }
479
480 static int ctlr_is_soft_resettable(u32 board_id)
481 {
482         int i;
483
484         for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
485                 if (soft_unresettable_controller[i] == board_id)
486                         return 0;
487         return 1;
488 }
489
490 static int ctlr_is_resettable(u32 board_id)
491 {
492         return ctlr_is_hard_resettable(board_id) ||
493                 ctlr_is_soft_resettable(board_id);
494 }
495
496 static ssize_t host_show_resettable(struct device *dev,
497         struct device_attribute *attr, char *buf)
498 {
499         struct ctlr_info *h;
500         struct Scsi_Host *shost = class_to_shost(dev);
501
502         h = shost_to_hba(shost);
503         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
504 }
505
506 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
507 {
508         return (scsi3addr[3] & 0xC0) == 0x40;
509 }
510
511 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
512         "1(+0)ADM", "UNKNOWN"
513 };
514 #define HPSA_RAID_0     0
515 #define HPSA_RAID_4     1
516 #define HPSA_RAID_1     2       /* also used for RAID 10 */
517 #define HPSA_RAID_5     3       /* also used for RAID 50 */
518 #define HPSA_RAID_51    4
519 #define HPSA_RAID_6     5       /* also used for RAID 60 */
520 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
521 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
522
523 static ssize_t raid_level_show(struct device *dev,
524              struct device_attribute *attr, char *buf)
525 {
526         ssize_t l = 0;
527         unsigned char rlevel;
528         struct ctlr_info *h;
529         struct scsi_device *sdev;
530         struct hpsa_scsi_dev_t *hdev;
531         unsigned long flags;
532
533         sdev = to_scsi_device(dev);
534         h = sdev_to_hba(sdev);
535         spin_lock_irqsave(&h->lock, flags);
536         hdev = sdev->hostdata;
537         if (!hdev) {
538                 spin_unlock_irqrestore(&h->lock, flags);
539                 return -ENODEV;
540         }
541
542         /* Is this even a logical drive? */
543         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
544                 spin_unlock_irqrestore(&h->lock, flags);
545                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
546                 return l;
547         }
548
549         rlevel = hdev->raid_level;
550         spin_unlock_irqrestore(&h->lock, flags);
551         if (rlevel > RAID_UNKNOWN)
552                 rlevel = RAID_UNKNOWN;
553         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
554         return l;
555 }
556
557 static ssize_t lunid_show(struct device *dev,
558              struct device_attribute *attr, char *buf)
559 {
560         struct ctlr_info *h;
561         struct scsi_device *sdev;
562         struct hpsa_scsi_dev_t *hdev;
563         unsigned long flags;
564         unsigned char lunid[8];
565
566         sdev = to_scsi_device(dev);
567         h = sdev_to_hba(sdev);
568         spin_lock_irqsave(&h->lock, flags);
569         hdev = sdev->hostdata;
570         if (!hdev) {
571                 spin_unlock_irqrestore(&h->lock, flags);
572                 return -ENODEV;
573         }
574         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
575         spin_unlock_irqrestore(&h->lock, flags);
576         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
577                 lunid[0], lunid[1], lunid[2], lunid[3],
578                 lunid[4], lunid[5], lunid[6], lunid[7]);
579 }
580
581 static ssize_t unique_id_show(struct device *dev,
582              struct device_attribute *attr, char *buf)
583 {
584         struct ctlr_info *h;
585         struct scsi_device *sdev;
586         struct hpsa_scsi_dev_t *hdev;
587         unsigned long flags;
588         unsigned char sn[16];
589
590         sdev = to_scsi_device(dev);
591         h = sdev_to_hba(sdev);
592         spin_lock_irqsave(&h->lock, flags);
593         hdev = sdev->hostdata;
594         if (!hdev) {
595                 spin_unlock_irqrestore(&h->lock, flags);
596                 return -ENODEV;
597         }
598         memcpy(sn, hdev->device_id, sizeof(sn));
599         spin_unlock_irqrestore(&h->lock, flags);
600         return snprintf(buf, 16 * 2 + 2,
601                         "%02X%02X%02X%02X%02X%02X%02X%02X"
602                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
603                         sn[0], sn[1], sn[2], sn[3],
604                         sn[4], sn[5], sn[6], sn[7],
605                         sn[8], sn[9], sn[10], sn[11],
606                         sn[12], sn[13], sn[14], sn[15]);
607 }
608
609 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
610              struct device_attribute *attr, char *buf)
611 {
612         struct ctlr_info *h;
613         struct scsi_device *sdev;
614         struct hpsa_scsi_dev_t *hdev;
615         unsigned long flags;
616         int offload_enabled;
617
618         sdev = to_scsi_device(dev);
619         h = sdev_to_hba(sdev);
620         spin_lock_irqsave(&h->lock, flags);
621         hdev = sdev->hostdata;
622         if (!hdev) {
623                 spin_unlock_irqrestore(&h->lock, flags);
624                 return -ENODEV;
625         }
626         offload_enabled = hdev->offload_enabled;
627         spin_unlock_irqrestore(&h->lock, flags);
628         return snprintf(buf, 20, "%d\n", offload_enabled);
629 }
630
631 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
632 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
633 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
634 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
635 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
636                         host_show_hp_ssd_smart_path_enabled, NULL);
637 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
638                 host_show_hp_ssd_smart_path_status,
639                 host_store_hp_ssd_smart_path_status);
640 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
641                         host_store_raid_offload_debug);
642 static DEVICE_ATTR(firmware_revision, S_IRUGO,
643         host_show_firmware_revision, NULL);
644 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
645         host_show_commands_outstanding, NULL);
646 static DEVICE_ATTR(transport_mode, S_IRUGO,
647         host_show_transport_mode, NULL);
648 static DEVICE_ATTR(resettable, S_IRUGO,
649         host_show_resettable, NULL);
650
651 static struct device_attribute *hpsa_sdev_attrs[] = {
652         &dev_attr_raid_level,
653         &dev_attr_lunid,
654         &dev_attr_unique_id,
655         &dev_attr_hp_ssd_smart_path_enabled,
656         NULL,
657 };
658
659 static struct device_attribute *hpsa_shost_attrs[] = {
660         &dev_attr_rescan,
661         &dev_attr_firmware_revision,
662         &dev_attr_commands_outstanding,
663         &dev_attr_transport_mode,
664         &dev_attr_resettable,
665         &dev_attr_hp_ssd_smart_path_status,
666         &dev_attr_raid_offload_debug,
667         NULL,
668 };
669
670 static struct scsi_host_template hpsa_driver_template = {
671         .module                 = THIS_MODULE,
672         .name                   = HPSA,
673         .proc_name              = HPSA,
674         .queuecommand           = hpsa_scsi_queue_command,
675         .scan_start             = hpsa_scan_start,
676         .scan_finished          = hpsa_scan_finished,
677         .change_queue_depth     = hpsa_change_queue_depth,
678         .this_id                = -1,
679         .use_clustering         = ENABLE_CLUSTERING,
680         .eh_abort_handler       = hpsa_eh_abort_handler,
681         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
682         .ioctl                  = hpsa_ioctl,
683         .slave_alloc            = hpsa_slave_alloc,
684         .slave_destroy          = hpsa_slave_destroy,
685 #ifdef CONFIG_COMPAT
686         .compat_ioctl           = hpsa_compat_ioctl,
687 #endif
688         .sdev_attrs = hpsa_sdev_attrs,
689         .shost_attrs = hpsa_shost_attrs,
690         .max_sectors = 8192,
691         .no_write_same = 1,
692 };
693
694 static inline u32 next_command(struct ctlr_info *h, u8 q)
695 {
696         u32 a;
697         struct reply_queue_buffer *rq = &h->reply_queue[q];
698
699         if (h->transMethod & CFGTBL_Trans_io_accel1)
700                 return h->access.command_completed(h, q);
701
702         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
703                 return h->access.command_completed(h, q);
704
705         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
706                 a = rq->head[rq->current_entry];
707                 rq->current_entry++;
708                 atomic_dec(&h->commands_outstanding);
709         } else {
710                 a = FIFO_EMPTY;
711         }
712         /* Check for wraparound */
713         if (rq->current_entry == h->max_commands) {
714                 rq->current_entry = 0;
715                 rq->wraparound ^= 1;
716         }
717         return a;
718 }
719
720 /*
721  * There are some special bits in the bus address of the
722  * command that we have to set for the controller to know
723  * how to process the command:
724  *
725  * Normal performant mode:
726  * bit 0: 1 means performant mode, 0 means simple mode.
727  * bits 1-3 = block fetch table entry
728  * bits 4-6 = command type (== 0)
729  *
730  * ioaccel1 mode:
731  * bit 0 = "performant mode" bit.
732  * bits 1-3 = block fetch table entry
733  * bits 4-6 = command type (== 110)
734  * (command type is needed because ioaccel1 mode
735  * commands are submitted through the same register as normal
736  * mode commands, so this is how the controller knows whether
737  * the command is normal mode or ioaccel1 mode.)
738  *
739  * ioaccel2 mode:
740  * bit 0 = "performant mode" bit.
741  * bits 1-4 = block fetch table entry (note extra bit)
742  * bits 4-6 = not needed, because ioaccel2 mode has
743  * a separate special register for submitting commands.
744  */
745
746 /* set_performant_mode: Modify the tag for cciss performant
747  * set bit 0 for pull model, bits 3-1 for block fetch
748  * register number
749  */
750 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
751 {
752         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
753                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
754                 if (likely(h->msix_vector > 0))
755                         c->Header.ReplyQueue =
756                                 raw_smp_processor_id() % h->nreply_queues;
757         }
758 }
759
760 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
761                                                 struct CommandList *c)
762 {
763         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
764
765         /* Tell the controller to post the reply to the queue for this
766          * processor.  This seems to give the best I/O throughput.
767          */
768         cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
769         /* Set the bits in the address sent down to include:
770          *  - performant mode bit (bit 0)
771          *  - pull count (bits 1-3)
772          *  - command type (bits 4-6)
773          */
774         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
775                                         IOACCEL1_BUSADDR_CMDTYPE;
776 }
777
778 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
779                                                 struct CommandList *c)
780 {
781         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
782
783         /* Tell the controller to post the reply to the queue for this
784          * processor.  This seems to give the best I/O throughput.
785          */
786         cp->reply_queue = smp_processor_id() % h->nreply_queues;
787         /* Set the bits in the address sent down to include:
788          *  - performant mode bit not used in ioaccel mode 2
789          *  - pull count (bits 0-3)
790          *  - command type isn't needed for ioaccel2
791          */
792         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
793 }
794
795 static int is_firmware_flash_cmd(u8 *cdb)
796 {
797         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
798 }
799
800 /*
801  * During firmware flash, the heartbeat register may not update as frequently
802  * as it should.  So we dial down lockup detection during firmware flash. and
803  * dial it back up when firmware flash completes.
804  */
805 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
806 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
807 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
808                 struct CommandList *c)
809 {
810         if (!is_firmware_flash_cmd(c->Request.CDB))
811                 return;
812         atomic_inc(&h->firmware_flash_in_progress);
813         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
814 }
815
816 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
817                 struct CommandList *c)
818 {
819         if (is_firmware_flash_cmd(c->Request.CDB) &&
820                 atomic_dec_and_test(&h->firmware_flash_in_progress))
821                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
822 }
823
824 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
825         struct CommandList *c)
826 {
827         dial_down_lockup_detection_during_fw_flash(h, c);
828         atomic_inc(&h->commands_outstanding);
829         switch (c->cmd_type) {
830         case CMD_IOACCEL1:
831                 set_ioaccel1_performant_mode(h, c);
832                 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
833                 break;
834         case CMD_IOACCEL2:
835                 set_ioaccel2_performant_mode(h, c);
836                 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
837                 break;
838         default:
839                 set_performant_mode(h, c);
840                 h->access.submit_command(h, c);
841         }
842 }
843
844 static inline int is_hba_lunid(unsigned char scsi3addr[])
845 {
846         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
847 }
848
849 static inline int is_scsi_rev_5(struct ctlr_info *h)
850 {
851         if (!h->hba_inquiry_data)
852                 return 0;
853         if ((h->hba_inquiry_data[2] & 0x07) == 5)
854                 return 1;
855         return 0;
856 }
857
858 static int hpsa_find_target_lun(struct ctlr_info *h,
859         unsigned char scsi3addr[], int bus, int *target, int *lun)
860 {
861         /* finds an unused bus, target, lun for a new physical device
862          * assumes h->devlock is held
863          */
864         int i, found = 0;
865         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
866
867         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
868
869         for (i = 0; i < h->ndevices; i++) {
870                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
871                         __set_bit(h->dev[i]->target, lun_taken);
872         }
873
874         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
875         if (i < HPSA_MAX_DEVICES) {
876                 /* *bus = 1; */
877                 *target = i;
878                 *lun = 0;
879                 found = 1;
880         }
881         return !found;
882 }
883
884 /* Add an entry into h->dev[] array. */
885 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
886                 struct hpsa_scsi_dev_t *device,
887                 struct hpsa_scsi_dev_t *added[], int *nadded)
888 {
889         /* assumes h->devlock is held */
890         int n = h->ndevices;
891         int i;
892         unsigned char addr1[8], addr2[8];
893         struct hpsa_scsi_dev_t *sd;
894
895         if (n >= HPSA_MAX_DEVICES) {
896                 dev_err(&h->pdev->dev, "too many devices, some will be "
897                         "inaccessible.\n");
898                 return -1;
899         }
900
901         /* physical devices do not have lun or target assigned until now. */
902         if (device->lun != -1)
903                 /* Logical device, lun is already assigned. */
904                 goto lun_assigned;
905
906         /* If this device a non-zero lun of a multi-lun device
907          * byte 4 of the 8-byte LUN addr will contain the logical
908          * unit no, zero otherwise.
909          */
910         if (device->scsi3addr[4] == 0) {
911                 /* This is not a non-zero lun of a multi-lun device */
912                 if (hpsa_find_target_lun(h, device->scsi3addr,
913                         device->bus, &device->target, &device->lun) != 0)
914                         return -1;
915                 goto lun_assigned;
916         }
917
918         /* This is a non-zero lun of a multi-lun device.
919          * Search through our list and find the device which
920          * has the same 8 byte LUN address, excepting byte 4.
921          * Assign the same bus and target for this new LUN.
922          * Use the logical unit number from the firmware.
923          */
924         memcpy(addr1, device->scsi3addr, 8);
925         addr1[4] = 0;
926         for (i = 0; i < n; i++) {
927                 sd = h->dev[i];
928                 memcpy(addr2, sd->scsi3addr, 8);
929                 addr2[4] = 0;
930                 /* differ only in byte 4? */
931                 if (memcmp(addr1, addr2, 8) == 0) {
932                         device->bus = sd->bus;
933                         device->target = sd->target;
934                         device->lun = device->scsi3addr[4];
935                         break;
936                 }
937         }
938         if (device->lun == -1) {
939                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
940                         " suspect firmware bug or unsupported hardware "
941                         "configuration.\n");
942                         return -1;
943         }
944
945 lun_assigned:
946
947         h->dev[n] = device;
948         h->ndevices++;
949         added[*nadded] = device;
950         (*nadded)++;
951
952         /* initially, (before registering with scsi layer) we don't
953          * know our hostno and we don't want to print anything first
954          * time anyway (the scsi layer's inquiries will show that info)
955          */
956         /* if (hostno != -1) */
957                 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
958                         scsi_device_type(device->devtype), hostno,
959                         device->bus, device->target, device->lun);
960         return 0;
961 }
962
963 /* Update an entry in h->dev[] array. */
964 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
965         int entry, struct hpsa_scsi_dev_t *new_entry)
966 {
967         /* assumes h->devlock is held */
968         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
969
970         /* Raid level changed. */
971         h->dev[entry]->raid_level = new_entry->raid_level;
972
973         /* Raid offload parameters changed.  Careful about the ordering. */
974         if (new_entry->offload_config && new_entry->offload_enabled) {
975                 /*
976                  * if drive is newly offload_enabled, we want to copy the
977                  * raid map data first.  If previously offload_enabled and
978                  * offload_config were set, raid map data had better be
979                  * the same as it was before.  if raid map data is changed
980                  * then it had better be the case that
981                  * h->dev[entry]->offload_enabled is currently 0.
982                  */
983                 h->dev[entry]->raid_map = new_entry->raid_map;
984                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
985                 wmb(); /* ensure raid map updated prior to ->offload_enabled */
986         }
987         h->dev[entry]->offload_config = new_entry->offload_config;
988         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
989         h->dev[entry]->offload_enabled = new_entry->offload_enabled;
990         h->dev[entry]->queue_depth = new_entry->queue_depth;
991
992         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
993                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
994                 new_entry->target, new_entry->lun);
995 }
996
997 /* Replace an entry from h->dev[] array. */
998 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
999         int entry, struct hpsa_scsi_dev_t *new_entry,
1000         struct hpsa_scsi_dev_t *added[], int *nadded,
1001         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1002 {
1003         /* assumes h->devlock is held */
1004         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1005         removed[*nremoved] = h->dev[entry];
1006         (*nremoved)++;
1007
1008         /*
1009          * New physical devices won't have target/lun assigned yet
1010          * so we need to preserve the values in the slot we are replacing.
1011          */
1012         if (new_entry->target == -1) {
1013                 new_entry->target = h->dev[entry]->target;
1014                 new_entry->lun = h->dev[entry]->lun;
1015         }
1016
1017         h->dev[entry] = new_entry;
1018         added[*nadded] = new_entry;
1019         (*nadded)++;
1020         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1021                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1022                         new_entry->target, new_entry->lun);
1023 }
1024
1025 /* Remove an entry from h->dev[] array. */
1026 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1027         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1028 {
1029         /* assumes h->devlock is held */
1030         int i;
1031         struct hpsa_scsi_dev_t *sd;
1032
1033         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1034
1035         sd = h->dev[entry];
1036         removed[*nremoved] = h->dev[entry];
1037         (*nremoved)++;
1038
1039         for (i = entry; i < h->ndevices-1; i++)
1040                 h->dev[i] = h->dev[i+1];
1041         h->ndevices--;
1042         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1043                 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1044                 sd->lun);
1045 }
1046
1047 #define SCSI3ADDR_EQ(a, b) ( \
1048         (a)[7] == (b)[7] && \
1049         (a)[6] == (b)[6] && \
1050         (a)[5] == (b)[5] && \
1051         (a)[4] == (b)[4] && \
1052         (a)[3] == (b)[3] && \
1053         (a)[2] == (b)[2] && \
1054         (a)[1] == (b)[1] && \
1055         (a)[0] == (b)[0])
1056
1057 static void fixup_botched_add(struct ctlr_info *h,
1058         struct hpsa_scsi_dev_t *added)
1059 {
1060         /* called when scsi_add_device fails in order to re-adjust
1061          * h->dev[] to match the mid layer's view.
1062          */
1063         unsigned long flags;
1064         int i, j;
1065
1066         spin_lock_irqsave(&h->lock, flags);
1067         for (i = 0; i < h->ndevices; i++) {
1068                 if (h->dev[i] == added) {
1069                         for (j = i; j < h->ndevices-1; j++)
1070                                 h->dev[j] = h->dev[j+1];
1071                         h->ndevices--;
1072                         break;
1073                 }
1074         }
1075         spin_unlock_irqrestore(&h->lock, flags);
1076         kfree(added);
1077 }
1078
1079 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1080         struct hpsa_scsi_dev_t *dev2)
1081 {
1082         /* we compare everything except lun and target as these
1083          * are not yet assigned.  Compare parts likely
1084          * to differ first
1085          */
1086         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1087                 sizeof(dev1->scsi3addr)) != 0)
1088                 return 0;
1089         if (memcmp(dev1->device_id, dev2->device_id,
1090                 sizeof(dev1->device_id)) != 0)
1091                 return 0;
1092         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1093                 return 0;
1094         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1095                 return 0;
1096         if (dev1->devtype != dev2->devtype)
1097                 return 0;
1098         if (dev1->bus != dev2->bus)
1099                 return 0;
1100         return 1;
1101 }
1102
1103 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1104         struct hpsa_scsi_dev_t *dev2)
1105 {
1106         /* Device attributes that can change, but don't mean
1107          * that the device is a different device, nor that the OS
1108          * needs to be told anything about the change.
1109          */
1110         if (dev1->raid_level != dev2->raid_level)
1111                 return 1;
1112         if (dev1->offload_config != dev2->offload_config)
1113                 return 1;
1114         if (dev1->offload_enabled != dev2->offload_enabled)
1115                 return 1;
1116         if (dev1->queue_depth != dev2->queue_depth)
1117                 return 1;
1118         return 0;
1119 }
1120
1121 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1122  * and return needle location in *index.  If scsi3addr matches, but not
1123  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1124  * location in *index.
1125  * In the case of a minor device attribute change, such as RAID level, just
1126  * return DEVICE_UPDATED, along with the updated device's location in index.
1127  * If needle not found, return DEVICE_NOT_FOUND.
1128  */
1129 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1130         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1131         int *index)
1132 {
1133         int i;
1134 #define DEVICE_NOT_FOUND 0
1135 #define DEVICE_CHANGED 1
1136 #define DEVICE_SAME 2
1137 #define DEVICE_UPDATED 3
1138         for (i = 0; i < haystack_size; i++) {
1139                 if (haystack[i] == NULL) /* previously removed. */
1140                         continue;
1141                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1142                         *index = i;
1143                         if (device_is_the_same(needle, haystack[i])) {
1144                                 if (device_updated(needle, haystack[i]))
1145                                         return DEVICE_UPDATED;
1146                                 return DEVICE_SAME;
1147                         } else {
1148                                 /* Keep offline devices offline */
1149                                 if (needle->volume_offline)
1150                                         return DEVICE_NOT_FOUND;
1151                                 return DEVICE_CHANGED;
1152                         }
1153                 }
1154         }
1155         *index = -1;
1156         return DEVICE_NOT_FOUND;
1157 }
1158
1159 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1160                                         unsigned char scsi3addr[])
1161 {
1162         struct offline_device_entry *device;
1163         unsigned long flags;
1164
1165         /* Check to see if device is already on the list */
1166         spin_lock_irqsave(&h->offline_device_lock, flags);
1167         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1168                 if (memcmp(device->scsi3addr, scsi3addr,
1169                         sizeof(device->scsi3addr)) == 0) {
1170                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1171                         return;
1172                 }
1173         }
1174         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1175
1176         /* Device is not on the list, add it. */
1177         device = kmalloc(sizeof(*device), GFP_KERNEL);
1178         if (!device) {
1179                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1180                 return;
1181         }
1182         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1183         spin_lock_irqsave(&h->offline_device_lock, flags);
1184         list_add_tail(&device->offline_list, &h->offline_device_list);
1185         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1186 }
1187
1188 /* Print a message explaining various offline volume states */
1189 static void hpsa_show_volume_status(struct ctlr_info *h,
1190         struct hpsa_scsi_dev_t *sd)
1191 {
1192         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1193                 dev_info(&h->pdev->dev,
1194                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1195                         h->scsi_host->host_no,
1196                         sd->bus, sd->target, sd->lun);
1197         switch (sd->volume_offline) {
1198         case HPSA_LV_OK:
1199                 break;
1200         case HPSA_LV_UNDERGOING_ERASE:
1201                 dev_info(&h->pdev->dev,
1202                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1203                         h->scsi_host->host_no,
1204                         sd->bus, sd->target, sd->lun);
1205                 break;
1206         case HPSA_LV_UNDERGOING_RPI:
1207                 dev_info(&h->pdev->dev,
1208                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1209                         h->scsi_host->host_no,
1210                         sd->bus, sd->target, sd->lun);
1211                 break;
1212         case HPSA_LV_PENDING_RPI:
1213                 dev_info(&h->pdev->dev,
1214                                 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1215                                 h->scsi_host->host_no,
1216                                 sd->bus, sd->target, sd->lun);
1217                 break;
1218         case HPSA_LV_ENCRYPTED_NO_KEY:
1219                 dev_info(&h->pdev->dev,
1220                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1221                         h->scsi_host->host_no,
1222                         sd->bus, sd->target, sd->lun);
1223                 break;
1224         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1225                 dev_info(&h->pdev->dev,
1226                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1227                         h->scsi_host->host_no,
1228                         sd->bus, sd->target, sd->lun);
1229                 break;
1230         case HPSA_LV_UNDERGOING_ENCRYPTION:
1231                 dev_info(&h->pdev->dev,
1232                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1233                         h->scsi_host->host_no,
1234                         sd->bus, sd->target, sd->lun);
1235                 break;
1236         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1237                 dev_info(&h->pdev->dev,
1238                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1239                         h->scsi_host->host_no,
1240                         sd->bus, sd->target, sd->lun);
1241                 break;
1242         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1243                 dev_info(&h->pdev->dev,
1244                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1245                         h->scsi_host->host_no,
1246                         sd->bus, sd->target, sd->lun);
1247                 break;
1248         case HPSA_LV_PENDING_ENCRYPTION:
1249                 dev_info(&h->pdev->dev,
1250                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1251                         h->scsi_host->host_no,
1252                         sd->bus, sd->target, sd->lun);
1253                 break;
1254         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1255                 dev_info(&h->pdev->dev,
1256                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1257                         h->scsi_host->host_no,
1258                         sd->bus, sd->target, sd->lun);
1259                 break;
1260         }
1261 }
1262
1263 /*
1264  * Figure the list of physical drive pointers for a logical drive with
1265  * raid offload configured.
1266  */
1267 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1268                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1269                                 struct hpsa_scsi_dev_t *logical_drive)
1270 {
1271         struct raid_map_data *map = &logical_drive->raid_map;
1272         struct raid_map_disk_data *dd = &map->data[0];
1273         int i, j;
1274         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1275                                 le16_to_cpu(map->metadata_disks_per_row);
1276         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1277                                 le16_to_cpu(map->layout_map_count) *
1278                                 total_disks_per_row;
1279         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1280                                 total_disks_per_row;
1281         int qdepth;
1282
1283         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1284                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1285
1286         qdepth = 0;
1287         for (i = 0; i < nraid_map_entries; i++) {
1288                 logical_drive->phys_disk[i] = NULL;
1289                 if (!logical_drive->offload_config)
1290                         continue;
1291                 for (j = 0; j < ndevices; j++) {
1292                         if (dev[j]->devtype != TYPE_DISK)
1293                                 continue;
1294                         if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1295                                 continue;
1296                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1297                                 continue;
1298
1299                         logical_drive->phys_disk[i] = dev[j];
1300                         if (i < nphys_disk)
1301                                 qdepth = min(h->nr_cmds, qdepth +
1302                                     logical_drive->phys_disk[i]->queue_depth);
1303                         break;
1304                 }
1305
1306                 /*
1307                  * This can happen if a physical drive is removed and
1308                  * the logical drive is degraded.  In that case, the RAID
1309                  * map data will refer to a physical disk which isn't actually
1310                  * present.  And in that case offload_enabled should already
1311                  * be 0, but we'll turn it off here just in case
1312                  */
1313                 if (!logical_drive->phys_disk[i]) {
1314                         logical_drive->offload_enabled = 0;
1315                         logical_drive->queue_depth = h->nr_cmds;
1316                 }
1317         }
1318         if (nraid_map_entries)
1319                 /*
1320                  * This is correct for reads, too high for full stripe writes,
1321                  * way too high for partial stripe writes
1322                  */
1323                 logical_drive->queue_depth = qdepth;
1324         else
1325                 logical_drive->queue_depth = h->nr_cmds;
1326 }
1327
1328 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1329                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1330 {
1331         int i;
1332
1333         for (i = 0; i < ndevices; i++) {
1334                 if (dev[i]->devtype != TYPE_DISK)
1335                         continue;
1336                 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1337                         continue;
1338                 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1339         }
1340 }
1341
1342 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1343         struct hpsa_scsi_dev_t *sd[], int nsds)
1344 {
1345         /* sd contains scsi3 addresses and devtypes, and inquiry
1346          * data.  This function takes what's in sd to be the current
1347          * reality and updates h->dev[] to reflect that reality.
1348          */
1349         int i, entry, device_change, changes = 0;
1350         struct hpsa_scsi_dev_t *csd;
1351         unsigned long flags;
1352         struct hpsa_scsi_dev_t **added, **removed;
1353         int nadded, nremoved;
1354         struct Scsi_Host *sh = NULL;
1355
1356         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1357         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1358
1359         if (!added || !removed) {
1360                 dev_warn(&h->pdev->dev, "out of memory in "
1361                         "adjust_hpsa_scsi_table\n");
1362                 goto free_and_out;
1363         }
1364
1365         spin_lock_irqsave(&h->devlock, flags);
1366
1367         /* find any devices in h->dev[] that are not in
1368          * sd[] and remove them from h->dev[], and for any
1369          * devices which have changed, remove the old device
1370          * info and add the new device info.
1371          * If minor device attributes change, just update
1372          * the existing device structure.
1373          */
1374         i = 0;
1375         nremoved = 0;
1376         nadded = 0;
1377         while (i < h->ndevices) {
1378                 csd = h->dev[i];
1379                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1380                 if (device_change == DEVICE_NOT_FOUND) {
1381                         changes++;
1382                         hpsa_scsi_remove_entry(h, hostno, i,
1383                                 removed, &nremoved);
1384                         continue; /* remove ^^^, hence i not incremented */
1385                 } else if (device_change == DEVICE_CHANGED) {
1386                         changes++;
1387                         hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1388                                 added, &nadded, removed, &nremoved);
1389                         /* Set it to NULL to prevent it from being freed
1390                          * at the bottom of hpsa_update_scsi_devices()
1391                          */
1392                         sd[entry] = NULL;
1393                 } else if (device_change == DEVICE_UPDATED) {
1394                         hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1395                 }
1396                 i++;
1397         }
1398
1399         /* Now, make sure every device listed in sd[] is also
1400          * listed in h->dev[], adding them if they aren't found
1401          */
1402
1403         for (i = 0; i < nsds; i++) {
1404                 if (!sd[i]) /* if already added above. */
1405                         continue;
1406
1407                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1408                  * as the SCSI mid-layer does not handle such devices well.
1409                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1410                  * at 160Hz, and prevents the system from coming up.
1411                  */
1412                 if (sd[i]->volume_offline) {
1413                         hpsa_show_volume_status(h, sd[i]);
1414                         dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1415                                 h->scsi_host->host_no,
1416                                 sd[i]->bus, sd[i]->target, sd[i]->lun);
1417                         continue;
1418                 }
1419
1420                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1421                                         h->ndevices, &entry);
1422                 if (device_change == DEVICE_NOT_FOUND) {
1423                         changes++;
1424                         if (hpsa_scsi_add_entry(h, hostno, sd[i],
1425                                 added, &nadded) != 0)
1426                                 break;
1427                         sd[i] = NULL; /* prevent from being freed later. */
1428                 } else if (device_change == DEVICE_CHANGED) {
1429                         /* should never happen... */
1430                         changes++;
1431                         dev_warn(&h->pdev->dev,
1432                                 "device unexpectedly changed.\n");
1433                         /* but if it does happen, we just ignore that device */
1434                 }
1435         }
1436         spin_unlock_irqrestore(&h->devlock, flags);
1437
1438         /* Monitor devices which are in one of several NOT READY states to be
1439          * brought online later. This must be done without holding h->devlock,
1440          * so don't touch h->dev[]
1441          */
1442         for (i = 0; i < nsds; i++) {
1443                 if (!sd[i]) /* if already added above. */
1444                         continue;
1445                 if (sd[i]->volume_offline)
1446                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1447         }
1448
1449         /* Don't notify scsi mid layer of any changes the first time through
1450          * (or if there are no changes) scsi_scan_host will do it later the
1451          * first time through.
1452          */
1453         if (hostno == -1 || !changes)
1454                 goto free_and_out;
1455
1456         sh = h->scsi_host;
1457         /* Notify scsi mid layer of any removed devices */
1458         for (i = 0; i < nremoved; i++) {
1459                 struct scsi_device *sdev =
1460                         scsi_device_lookup(sh, removed[i]->bus,
1461                                 removed[i]->target, removed[i]->lun);
1462                 if (sdev != NULL) {
1463                         scsi_remove_device(sdev);
1464                         scsi_device_put(sdev);
1465                 } else {
1466                         /* We don't expect to get here.
1467                          * future cmds to this device will get selection
1468                          * timeout as if the device was gone.
1469                          */
1470                         dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1471                                 " for removal.", hostno, removed[i]->bus,
1472                                 removed[i]->target, removed[i]->lun);
1473                 }
1474                 kfree(removed[i]);
1475                 removed[i] = NULL;
1476         }
1477
1478         /* Notify scsi mid layer of any added devices */
1479         for (i = 0; i < nadded; i++) {
1480                 if (scsi_add_device(sh, added[i]->bus,
1481                         added[i]->target, added[i]->lun) == 0)
1482                         continue;
1483                 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1484                         "device not added.\n", hostno, added[i]->bus,
1485                         added[i]->target, added[i]->lun);
1486                 /* now we have to remove it from h->dev,
1487                  * since it didn't get added to scsi mid layer
1488                  */
1489                 fixup_botched_add(h, added[i]);
1490         }
1491
1492 free_and_out:
1493         kfree(added);
1494         kfree(removed);
1495 }
1496
1497 /*
1498  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1499  * Assume's h->devlock is held.
1500  */
1501 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1502         int bus, int target, int lun)
1503 {
1504         int i;
1505         struct hpsa_scsi_dev_t *sd;
1506
1507         for (i = 0; i < h->ndevices; i++) {
1508                 sd = h->dev[i];
1509                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1510                         return sd;
1511         }
1512         return NULL;
1513 }
1514
1515 /* link sdev->hostdata to our per-device structure. */
1516 static int hpsa_slave_alloc(struct scsi_device *sdev)
1517 {
1518         struct hpsa_scsi_dev_t *sd;
1519         unsigned long flags;
1520         struct ctlr_info *h;
1521
1522         h = sdev_to_hba(sdev);
1523         spin_lock_irqsave(&h->devlock, flags);
1524         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1525                 sdev_id(sdev), sdev->lun);
1526         if (sd != NULL) {
1527                 sdev->hostdata = sd;
1528                 if (sd->queue_depth)
1529                         scsi_change_queue_depth(sdev, sd->queue_depth);
1530                 atomic_set(&sd->ioaccel_cmds_out, 0);
1531         }
1532         spin_unlock_irqrestore(&h->devlock, flags);
1533         return 0;
1534 }
1535
1536 static void hpsa_slave_destroy(struct scsi_device *sdev)
1537 {
1538         /* nothing to do. */
1539 }
1540
1541 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1542 {
1543         int i;
1544
1545         if (!h->cmd_sg_list)
1546                 return;
1547         for (i = 0; i < h->nr_cmds; i++) {
1548                 kfree(h->cmd_sg_list[i]);
1549                 h->cmd_sg_list[i] = NULL;
1550         }
1551         kfree(h->cmd_sg_list);
1552         h->cmd_sg_list = NULL;
1553 }
1554
1555 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1556 {
1557         int i;
1558
1559         if (h->chainsize <= 0)
1560                 return 0;
1561
1562         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1563                                 GFP_KERNEL);
1564         if (!h->cmd_sg_list) {
1565                 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1566                 return -ENOMEM;
1567         }
1568         for (i = 0; i < h->nr_cmds; i++) {
1569                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1570                                                 h->chainsize, GFP_KERNEL);
1571                 if (!h->cmd_sg_list[i]) {
1572                         dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1573                         goto clean;
1574                 }
1575         }
1576         return 0;
1577
1578 clean:
1579         hpsa_free_sg_chain_blocks(h);
1580         return -ENOMEM;
1581 }
1582
1583 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1584         struct CommandList *c)
1585 {
1586         struct SGDescriptor *chain_sg, *chain_block;
1587         u64 temp64;
1588         u32 chain_len;
1589
1590         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1591         chain_block = h->cmd_sg_list[c->cmdindex];
1592         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1593         chain_len = sizeof(*chain_sg) *
1594                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1595         chain_sg->Len = cpu_to_le32(chain_len);
1596         temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1597                                 PCI_DMA_TODEVICE);
1598         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1599                 /* prevent subsequent unmapping */
1600                 chain_sg->Addr = cpu_to_le64(0);
1601                 return -1;
1602         }
1603         chain_sg->Addr = cpu_to_le64(temp64);
1604         return 0;
1605 }
1606
1607 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1608         struct CommandList *c)
1609 {
1610         struct SGDescriptor *chain_sg;
1611
1612         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1613                 return;
1614
1615         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1616         pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1617                         le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1618 }
1619
1620
1621 /* Decode the various types of errors on ioaccel2 path.
1622  * Return 1 for any error that should generate a RAID path retry.
1623  * Return 0 for errors that don't require a RAID path retry.
1624  */
1625 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1626                                         struct CommandList *c,
1627                                         struct scsi_cmnd *cmd,
1628                                         struct io_accel2_cmd *c2)
1629 {
1630         int data_len;
1631         int retry = 0;
1632
1633         switch (c2->error_data.serv_response) {
1634         case IOACCEL2_SERV_RESPONSE_COMPLETE:
1635                 switch (c2->error_data.status) {
1636                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1637                         break;
1638                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1639                         dev_warn(&h->pdev->dev,
1640                                 "%s: task complete with check condition.\n",
1641                                 "HP SSD Smart Path");
1642                         cmd->result |= SAM_STAT_CHECK_CONDITION;
1643                         if (c2->error_data.data_present !=
1644                                         IOACCEL2_SENSE_DATA_PRESENT) {
1645                                 memset(cmd->sense_buffer, 0,
1646                                         SCSI_SENSE_BUFFERSIZE);
1647                                 break;
1648                         }
1649                         /* copy the sense data */
1650                         data_len = c2->error_data.sense_data_len;
1651                         if (data_len > SCSI_SENSE_BUFFERSIZE)
1652                                 data_len = SCSI_SENSE_BUFFERSIZE;
1653                         if (data_len > sizeof(c2->error_data.sense_data_buff))
1654                                 data_len =
1655                                         sizeof(c2->error_data.sense_data_buff);
1656                         memcpy(cmd->sense_buffer,
1657                                 c2->error_data.sense_data_buff, data_len);
1658                         retry = 1;
1659                         break;
1660                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1661                         dev_warn(&h->pdev->dev,
1662                                 "%s: task complete with BUSY status.\n",
1663                                 "HP SSD Smart Path");
1664                         retry = 1;
1665                         break;
1666                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1667                         dev_warn(&h->pdev->dev,
1668                                 "%s: task complete with reservation conflict.\n",
1669                                 "HP SSD Smart Path");
1670                         retry = 1;
1671                         break;
1672                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1673                         /* Make scsi midlayer do unlimited retries */
1674                         cmd->result = DID_IMM_RETRY << 16;
1675                         break;
1676                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1677                         dev_warn(&h->pdev->dev,
1678                                 "%s: task complete with aborted status.\n",
1679                                 "HP SSD Smart Path");
1680                         retry = 1;
1681                         break;
1682                 default:
1683                         dev_warn(&h->pdev->dev,
1684                                 "%s: task complete with unrecognized status: 0x%02x\n",
1685                                 "HP SSD Smart Path", c2->error_data.status);
1686                         retry = 1;
1687                         break;
1688                 }
1689                 break;
1690         case IOACCEL2_SERV_RESPONSE_FAILURE:
1691                 /* don't expect to get here. */
1692                 dev_warn(&h->pdev->dev,
1693                         "unexpected delivery or target failure, status = 0x%02x\n",
1694                         c2->error_data.status);
1695                 retry = 1;
1696                 break;
1697         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1698                 break;
1699         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1700                 break;
1701         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1702                 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1703                 retry = 1;
1704                 break;
1705         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1706                 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1707                 break;
1708         default:
1709                 dev_warn(&h->pdev->dev,
1710                         "%s: Unrecognized server response: 0x%02x\n",
1711                         "HP SSD Smart Path",
1712                         c2->error_data.serv_response);
1713                 retry = 1;
1714                 break;
1715         }
1716
1717         return retry;   /* retry on raid path? */
1718 }
1719
1720 static void process_ioaccel2_completion(struct ctlr_info *h,
1721                 struct CommandList *c, struct scsi_cmnd *cmd,
1722                 struct hpsa_scsi_dev_t *dev)
1723 {
1724         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1725
1726         /* check for good status */
1727         if (likely(c2->error_data.serv_response == 0 &&
1728                         c2->error_data.status == 0)) {
1729                 cmd_free(h, c);
1730                 cmd->scsi_done(cmd);
1731                 return;
1732         }
1733
1734         /* Any RAID offload error results in retry which will use
1735          * the normal I/O path so the controller can handle whatever's
1736          * wrong.
1737          */
1738         if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1739                 c2->error_data.serv_response ==
1740                         IOACCEL2_SERV_RESPONSE_FAILURE) {
1741                 if (c2->error_data.status ==
1742                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1743                         dev->offload_enabled = 0;
1744                 goto retry_cmd;
1745         }
1746
1747         if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1748                 goto retry_cmd;
1749
1750         cmd_free(h, c);
1751         cmd->scsi_done(cmd);
1752         return;
1753
1754 retry_cmd:
1755         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1756         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
1757 }
1758
1759 static void complete_scsi_command(struct CommandList *cp)
1760 {
1761         struct scsi_cmnd *cmd;
1762         struct ctlr_info *h;
1763         struct ErrorInfo *ei;
1764         struct hpsa_scsi_dev_t *dev;
1765
1766         unsigned char sense_key;
1767         unsigned char asc;      /* additional sense code */
1768         unsigned char ascq;     /* additional sense code qualifier */
1769         unsigned long sense_data_size;
1770
1771         ei = cp->err_info;
1772         cmd = cp->scsi_cmd;
1773         h = cp->h;
1774         dev = cmd->device->hostdata;
1775
1776         scsi_dma_unmap(cmd); /* undo the DMA mappings */
1777         if ((cp->cmd_type == CMD_SCSI) &&
1778                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1779                 hpsa_unmap_sg_chain_block(h, cp);
1780
1781         cmd->result = (DID_OK << 16);           /* host byte */
1782         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1783
1784         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1785                 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1786
1787         if (cp->cmd_type == CMD_IOACCEL2)
1788                 return process_ioaccel2_completion(h, cp, cmd, dev);
1789
1790         cmd->result |= ei->ScsiStatus;
1791
1792         scsi_set_resid(cmd, ei->ResidualCnt);
1793         if (ei->CommandStatus == 0) {
1794                 if (cp->cmd_type == CMD_IOACCEL1)
1795                         atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1796                 cmd_free(h, cp);
1797                 cmd->scsi_done(cmd);
1798                 return;
1799         }
1800
1801         /* copy the sense data */
1802         if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1803                 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1804         else
1805                 sense_data_size = sizeof(ei->SenseInfo);
1806         if (ei->SenseLen < sense_data_size)
1807                 sense_data_size = ei->SenseLen;
1808
1809         memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1810
1811         /* For I/O accelerator commands, copy over some fields to the normal
1812          * CISS header used below for error handling.
1813          */
1814         if (cp->cmd_type == CMD_IOACCEL1) {
1815                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1816                 cp->Header.SGList = scsi_sg_count(cmd);
1817                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1818                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1819                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
1820                 cp->Header.tag = c->tag;
1821                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1822                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1823
1824                 /* Any RAID offload error results in retry which will use
1825                  * the normal I/O path so the controller can handle whatever's
1826                  * wrong.
1827                  */
1828                 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1829                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1830                                 dev->offload_enabled = 0;
1831                         INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1832                         queue_work_on(raw_smp_processor_id(),
1833                                         h->resubmit_wq, &cp->work);
1834                         return;
1835                 }
1836         }
1837
1838         /* an error has occurred */
1839         switch (ei->CommandStatus) {
1840
1841         case CMD_TARGET_STATUS:
1842                 if (ei->ScsiStatus) {
1843                         /* Get sense key */
1844                         sense_key = 0xf & ei->SenseInfo[2];
1845                         /* Get additional sense code */
1846                         asc = ei->SenseInfo[12];
1847                         /* Get addition sense code qualifier */
1848                         ascq = ei->SenseInfo[13];
1849                 }
1850                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1851                         if (sense_key == ABORTED_COMMAND) {
1852                                 cmd->result |= DID_SOFT_ERROR << 16;
1853                                 break;
1854                         }
1855                         break;
1856                 }
1857                 /* Problem was not a check condition
1858                  * Pass it up to the upper layers...
1859                  */
1860                 if (ei->ScsiStatus) {
1861                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1862                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1863                                 "Returning result: 0x%x\n",
1864                                 cp, ei->ScsiStatus,
1865                                 sense_key, asc, ascq,
1866                                 cmd->result);
1867                 } else {  /* scsi status is zero??? How??? */
1868                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1869                                 "Returning no connection.\n", cp),
1870
1871                         /* Ordinarily, this case should never happen,
1872                          * but there is a bug in some released firmware
1873                          * revisions that allows it to happen if, for
1874                          * example, a 4100 backplane loses power and
1875                          * the tape drive is in it.  We assume that
1876                          * it's a fatal error of some kind because we
1877                          * can't show that it wasn't. We will make it
1878                          * look like selection timeout since that is
1879                          * the most common reason for this to occur,
1880                          * and it's severe enough.
1881                          */
1882
1883                         cmd->result = DID_NO_CONNECT << 16;
1884                 }
1885                 break;
1886
1887         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1888                 break;
1889         case CMD_DATA_OVERRUN:
1890                 dev_warn(&h->pdev->dev,
1891                         "CDB %16phN data overrun\n", cp->Request.CDB);
1892                 break;
1893         case CMD_INVALID: {
1894                 /* print_bytes(cp, sizeof(*cp), 1, 0);
1895                 print_cmd(cp); */
1896                 /* We get CMD_INVALID if you address a non-existent device
1897                  * instead of a selection timeout (no response).  You will
1898                  * see this if you yank out a drive, then try to access it.
1899                  * This is kind of a shame because it means that any other
1900                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
1901                  * missing target. */
1902                 cmd->result = DID_NO_CONNECT << 16;
1903         }
1904                 break;
1905         case CMD_PROTOCOL_ERR:
1906                 cmd->result = DID_ERROR << 16;
1907                 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
1908                                 cp->Request.CDB);
1909                 break;
1910         case CMD_HARDWARE_ERR:
1911                 cmd->result = DID_ERROR << 16;
1912                 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
1913                         cp->Request.CDB);
1914                 break;
1915         case CMD_CONNECTION_LOST:
1916                 cmd->result = DID_ERROR << 16;
1917                 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
1918                         cp->Request.CDB);
1919                 break;
1920         case CMD_ABORTED:
1921                 cmd->result = DID_ABORT << 16;
1922                 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
1923                                 cp->Request.CDB, ei->ScsiStatus);
1924                 break;
1925         case CMD_ABORT_FAILED:
1926                 cmd->result = DID_ERROR << 16;
1927                 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
1928                         cp->Request.CDB);
1929                 break;
1930         case CMD_UNSOLICITED_ABORT:
1931                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1932                 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
1933                         cp->Request.CDB);
1934                 break;
1935         case CMD_TIMEOUT:
1936                 cmd->result = DID_TIME_OUT << 16;
1937                 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
1938                         cp->Request.CDB);
1939                 break;
1940         case CMD_UNABORTABLE:
1941                 cmd->result = DID_ERROR << 16;
1942                 dev_warn(&h->pdev->dev, "Command unabortable\n");
1943                 break;
1944         case CMD_IOACCEL_DISABLED:
1945                 /* This only handles the direct pass-through case since RAID
1946                  * offload is handled above.  Just attempt a retry.
1947                  */
1948                 cmd->result = DID_SOFT_ERROR << 16;
1949                 dev_warn(&h->pdev->dev,
1950                                 "cp %p had HP SSD Smart Path error\n", cp);
1951                 break;
1952         default:
1953                 cmd->result = DID_ERROR << 16;
1954                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1955                                 cp, ei->CommandStatus);
1956         }
1957         cmd_free(h, cp);
1958         cmd->scsi_done(cmd);
1959 }
1960
1961 static void hpsa_pci_unmap(struct pci_dev *pdev,
1962         struct CommandList *c, int sg_used, int data_direction)
1963 {
1964         int i;
1965
1966         for (i = 0; i < sg_used; i++)
1967                 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1968                                 le32_to_cpu(c->SG[i].Len),
1969                                 data_direction);
1970 }
1971
1972 static int hpsa_map_one(struct pci_dev *pdev,
1973                 struct CommandList *cp,
1974                 unsigned char *buf,
1975                 size_t buflen,
1976                 int data_direction)
1977 {
1978         u64 addr64;
1979
1980         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1981                 cp->Header.SGList = 0;
1982                 cp->Header.SGTotal = cpu_to_le16(0);
1983                 return 0;
1984         }
1985
1986         addr64 = pci_map_single(pdev, buf, buflen, data_direction);
1987         if (dma_mapping_error(&pdev->dev, addr64)) {
1988                 /* Prevent subsequent unmap of something never mapped */
1989                 cp->Header.SGList = 0;
1990                 cp->Header.SGTotal = cpu_to_le16(0);
1991                 return -1;
1992         }
1993         cp->SG[0].Addr = cpu_to_le64(addr64);
1994         cp->SG[0].Len = cpu_to_le32(buflen);
1995         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1996         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
1997         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
1998         return 0;
1999 }
2000
2001 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2002         struct CommandList *c)
2003 {
2004         DECLARE_COMPLETION_ONSTACK(wait);
2005
2006         c->waiting = &wait;
2007         enqueue_cmd_and_start_io(h, c);
2008         wait_for_completion(&wait);
2009 }
2010
2011 static u32 lockup_detected(struct ctlr_info *h)
2012 {
2013         int cpu;
2014         u32 rc, *lockup_detected;
2015
2016         cpu = get_cpu();
2017         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2018         rc = *lockup_detected;
2019         put_cpu();
2020         return rc;
2021 }
2022
2023 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2024         struct CommandList *c)
2025 {
2026         /* If controller lockup detected, fake a hardware error. */
2027         if (unlikely(lockup_detected(h)))
2028                 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2029         else
2030                 hpsa_scsi_do_simple_cmd_core(h, c);
2031 }
2032
2033 #define MAX_DRIVER_CMD_RETRIES 25
2034 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2035         struct CommandList *c, int data_direction)
2036 {
2037         int backoff_time = 10, retry_count = 0;
2038
2039         do {
2040                 memset(c->err_info, 0, sizeof(*c->err_info));
2041                 hpsa_scsi_do_simple_cmd_core(h, c);
2042                 retry_count++;
2043                 if (retry_count > 3) {
2044                         msleep(backoff_time);
2045                         if (backoff_time < 1000)
2046                                 backoff_time *= 2;
2047                 }
2048         } while ((check_for_unit_attention(h, c) ||
2049                         check_for_busy(h, c)) &&
2050                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2051         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2052 }
2053
2054 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2055                                 struct CommandList *c)
2056 {
2057         const u8 *cdb = c->Request.CDB;
2058         const u8 *lun = c->Header.LUN.LunAddrBytes;
2059
2060         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2061         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2062                 txt, lun[0], lun[1], lun[2], lun[3],
2063                 lun[4], lun[5], lun[6], lun[7],
2064                 cdb[0], cdb[1], cdb[2], cdb[3],
2065                 cdb[4], cdb[5], cdb[6], cdb[7],
2066                 cdb[8], cdb[9], cdb[10], cdb[11],
2067                 cdb[12], cdb[13], cdb[14], cdb[15]);
2068 }
2069
2070 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2071                         struct CommandList *cp)
2072 {
2073         const struct ErrorInfo *ei = cp->err_info;
2074         struct device *d = &cp->h->pdev->dev;
2075         const u8 *sd = ei->SenseInfo;
2076
2077         switch (ei->CommandStatus) {
2078         case CMD_TARGET_STATUS:
2079                 hpsa_print_cmd(h, "SCSI status", cp);
2080                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2081                         dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2082                                 sd[2] & 0x0f, sd[12], sd[13]);
2083                 else
2084                         dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2085                 if (ei->ScsiStatus == 0)
2086                         dev_warn(d, "SCSI status is abnormally zero.  "
2087                         "(probably indicates selection timeout "
2088                         "reported incorrectly due to a known "
2089                         "firmware bug, circa July, 2001.)\n");
2090                 break;
2091         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2092                 break;
2093         case CMD_DATA_OVERRUN:
2094                 hpsa_print_cmd(h, "overrun condition", cp);
2095                 break;
2096         case CMD_INVALID: {
2097                 /* controller unfortunately reports SCSI passthru's
2098                  * to non-existent targets as invalid commands.
2099                  */
2100                 hpsa_print_cmd(h, "invalid command", cp);
2101                 dev_warn(d, "probably means device no longer present\n");
2102                 }
2103                 break;
2104         case CMD_PROTOCOL_ERR:
2105                 hpsa_print_cmd(h, "protocol error", cp);
2106                 break;
2107         case CMD_HARDWARE_ERR:
2108                 hpsa_print_cmd(h, "hardware error", cp);
2109                 break;
2110         case CMD_CONNECTION_LOST:
2111                 hpsa_print_cmd(h, "connection lost", cp);
2112                 break;
2113         case CMD_ABORTED:
2114                 hpsa_print_cmd(h, "aborted", cp);
2115                 break;
2116         case CMD_ABORT_FAILED:
2117                 hpsa_print_cmd(h, "abort failed", cp);
2118                 break;
2119         case CMD_UNSOLICITED_ABORT:
2120                 hpsa_print_cmd(h, "unsolicited abort", cp);
2121                 break;
2122         case CMD_TIMEOUT:
2123                 hpsa_print_cmd(h, "timed out", cp);
2124                 break;
2125         case CMD_UNABORTABLE:
2126                 hpsa_print_cmd(h, "unabortable", cp);
2127                 break;
2128         default:
2129                 hpsa_print_cmd(h, "unknown status", cp);
2130                 dev_warn(d, "Unknown command status %x\n",
2131                                 ei->CommandStatus);
2132         }
2133 }
2134
2135 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2136                         u16 page, unsigned char *buf,
2137                         unsigned char bufsize)
2138 {
2139         int rc = IO_OK;
2140         struct CommandList *c;
2141         struct ErrorInfo *ei;
2142
2143         c = cmd_alloc(h);
2144
2145         if (c == NULL) {
2146                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2147                 return -ENOMEM;
2148         }
2149
2150         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2151                         page, scsi3addr, TYPE_CMD)) {
2152                 rc = -1;
2153                 goto out;
2154         }
2155         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2156         ei = c->err_info;
2157         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2158                 hpsa_scsi_interpret_error(h, c);
2159                 rc = -1;
2160         }
2161 out:
2162         cmd_free(h, c);
2163         return rc;
2164 }
2165
2166 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2167                 unsigned char *scsi3addr, unsigned char page,
2168                 struct bmic_controller_parameters *buf, size_t bufsize)
2169 {
2170         int rc = IO_OK;
2171         struct CommandList *c;
2172         struct ErrorInfo *ei;
2173
2174         c = cmd_alloc(h);
2175         if (c == NULL) {                        /* trouble... */
2176                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2177                 return -ENOMEM;
2178         }
2179
2180         if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2181                         page, scsi3addr, TYPE_CMD)) {
2182                 rc = -1;
2183                 goto out;
2184         }
2185         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2186         ei = c->err_info;
2187         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2188                 hpsa_scsi_interpret_error(h, c);
2189                 rc = -1;
2190         }
2191 out:
2192         cmd_free(h, c);
2193         return rc;
2194         }
2195
2196 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2197         u8 reset_type)
2198 {
2199         int rc = IO_OK;
2200         struct CommandList *c;
2201         struct ErrorInfo *ei;
2202
2203         c = cmd_alloc(h);
2204
2205         if (c == NULL) {                        /* trouble... */
2206                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2207                 return -ENOMEM;
2208         }
2209
2210         /* fill_cmd can't fail here, no data buffer to map. */
2211         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2212                         scsi3addr, TYPE_MSG);
2213         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2214         hpsa_scsi_do_simple_cmd_core(h, c);
2215         /* no unmap needed here because no data xfer. */
2216
2217         ei = c->err_info;
2218         if (ei->CommandStatus != 0) {
2219                 hpsa_scsi_interpret_error(h, c);
2220                 rc = -1;
2221         }
2222         cmd_free(h, c);
2223         return rc;
2224 }
2225
2226 static void hpsa_get_raid_level(struct ctlr_info *h,
2227         unsigned char *scsi3addr, unsigned char *raid_level)
2228 {
2229         int rc;
2230         unsigned char *buf;
2231
2232         *raid_level = RAID_UNKNOWN;
2233         buf = kzalloc(64, GFP_KERNEL);
2234         if (!buf)
2235                 return;
2236         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2237         if (rc == 0)
2238                 *raid_level = buf[8];
2239         if (*raid_level > RAID_UNKNOWN)
2240                 *raid_level = RAID_UNKNOWN;
2241         kfree(buf);
2242         return;
2243 }
2244
2245 #define HPSA_MAP_DEBUG
2246 #ifdef HPSA_MAP_DEBUG
2247 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2248                                 struct raid_map_data *map_buff)
2249 {
2250         struct raid_map_disk_data *dd = &map_buff->data[0];
2251         int map, row, col;
2252         u16 map_cnt, row_cnt, disks_per_row;
2253
2254         if (rc != 0)
2255                 return;
2256
2257         /* Show details only if debugging has been activated. */
2258         if (h->raid_offload_debug < 2)
2259                 return;
2260
2261         dev_info(&h->pdev->dev, "structure_size = %u\n",
2262                                 le32_to_cpu(map_buff->structure_size));
2263         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2264                         le32_to_cpu(map_buff->volume_blk_size));
2265         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2266                         le64_to_cpu(map_buff->volume_blk_cnt));
2267         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2268                         map_buff->phys_blk_shift);
2269         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2270                         map_buff->parity_rotation_shift);
2271         dev_info(&h->pdev->dev, "strip_size = %u\n",
2272                         le16_to_cpu(map_buff->strip_size));
2273         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2274                         le64_to_cpu(map_buff->disk_starting_blk));
2275         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2276                         le64_to_cpu(map_buff->disk_blk_cnt));
2277         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2278                         le16_to_cpu(map_buff->data_disks_per_row));
2279         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2280                         le16_to_cpu(map_buff->metadata_disks_per_row));
2281         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2282                         le16_to_cpu(map_buff->row_cnt));
2283         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2284                         le16_to_cpu(map_buff->layout_map_count));
2285         dev_info(&h->pdev->dev, "flags = 0x%x\n",
2286                         le16_to_cpu(map_buff->flags));
2287         dev_info(&h->pdev->dev, "encrypytion = %s\n",
2288                         le16_to_cpu(map_buff->flags) &
2289                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2290         dev_info(&h->pdev->dev, "dekindex = %u\n",
2291                         le16_to_cpu(map_buff->dekindex));
2292         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2293         for (map = 0; map < map_cnt; map++) {
2294                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2295                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2296                 for (row = 0; row < row_cnt; row++) {
2297                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2298                         disks_per_row =
2299                                 le16_to_cpu(map_buff->data_disks_per_row);
2300                         for (col = 0; col < disks_per_row; col++, dd++)
2301                                 dev_info(&h->pdev->dev,
2302                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2303                                         col, dd->ioaccel_handle,
2304                                         dd->xor_mult[0], dd->xor_mult[1]);
2305                         disks_per_row =
2306                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2307                         for (col = 0; col < disks_per_row; col++, dd++)
2308                                 dev_info(&h->pdev->dev,
2309                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2310                                         col, dd->ioaccel_handle,
2311                                         dd->xor_mult[0], dd->xor_mult[1]);
2312                 }
2313         }
2314 }
2315 #else
2316 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2317                         __attribute__((unused)) int rc,
2318                         __attribute__((unused)) struct raid_map_data *map_buff)
2319 {
2320 }
2321 #endif
2322
2323 static int hpsa_get_raid_map(struct ctlr_info *h,
2324         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2325 {
2326         int rc = 0;
2327         struct CommandList *c;
2328         struct ErrorInfo *ei;
2329
2330         c = cmd_alloc(h);
2331         if (c == NULL) {
2332                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2333                 return -ENOMEM;
2334         }
2335         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2336                         sizeof(this_device->raid_map), 0,
2337                         scsi3addr, TYPE_CMD)) {
2338                 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2339                 cmd_free(h, c);
2340                 return -ENOMEM;
2341         }
2342         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2343         ei = c->err_info;
2344         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2345                 hpsa_scsi_interpret_error(h, c);
2346                 cmd_free(h, c);
2347                 return -1;
2348         }
2349         cmd_free(h, c);
2350
2351         /* @todo in the future, dynamically allocate RAID map memory */
2352         if (le32_to_cpu(this_device->raid_map.structure_size) >
2353                                 sizeof(this_device->raid_map)) {
2354                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2355                 rc = -1;
2356         }
2357         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2358         return rc;
2359 }
2360
2361 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2362                 unsigned char scsi3addr[], u16 bmic_device_index,
2363                 struct bmic_identify_physical_device *buf, size_t bufsize)
2364 {
2365         int rc = IO_OK;
2366         struct CommandList *c;
2367         struct ErrorInfo *ei;
2368
2369         c = cmd_alloc(h);
2370         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2371                 0, RAID_CTLR_LUNID, TYPE_CMD);
2372         if (rc)
2373                 goto out;
2374
2375         c->Request.CDB[2] = bmic_device_index & 0xff;
2376         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2377
2378         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2379         ei = c->err_info;
2380         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2381                 hpsa_scsi_interpret_error(h, c);
2382                 rc = -1;
2383         }
2384 out:
2385         cmd_free(h, c);
2386         return rc;
2387 }
2388
2389 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2390         unsigned char scsi3addr[], u8 page)
2391 {
2392         int rc;
2393         int i;
2394         int pages;
2395         unsigned char *buf, bufsize;
2396
2397         buf = kzalloc(256, GFP_KERNEL);
2398         if (!buf)
2399                 return 0;
2400
2401         /* Get the size of the page list first */
2402         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2403                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2404                                 buf, HPSA_VPD_HEADER_SZ);
2405         if (rc != 0)
2406                 goto exit_unsupported;
2407         pages = buf[3];
2408         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2409                 bufsize = pages + HPSA_VPD_HEADER_SZ;
2410         else
2411                 bufsize = 255;
2412
2413         /* Get the whole VPD page list */
2414         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2415                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2416                                 buf, bufsize);
2417         if (rc != 0)
2418                 goto exit_unsupported;
2419
2420         pages = buf[3];
2421         for (i = 1; i <= pages; i++)
2422                 if (buf[3 + i] == page)
2423                         goto exit_supported;
2424 exit_unsupported:
2425         kfree(buf);
2426         return 0;
2427 exit_supported:
2428         kfree(buf);
2429         return 1;
2430 }
2431
2432 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2433         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2434 {
2435         int rc;
2436         unsigned char *buf;
2437         u8 ioaccel_status;
2438
2439         this_device->offload_config = 0;
2440         this_device->offload_enabled = 0;
2441
2442         buf = kzalloc(64, GFP_KERNEL);
2443         if (!buf)
2444                 return;
2445         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2446                 goto out;
2447         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2448                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2449         if (rc != 0)
2450                 goto out;
2451
2452 #define IOACCEL_STATUS_BYTE 4
2453 #define OFFLOAD_CONFIGURED_BIT 0x01
2454 #define OFFLOAD_ENABLED_BIT 0x02
2455         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2456         this_device->offload_config =
2457                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2458         if (this_device->offload_config) {
2459                 this_device->offload_enabled =
2460                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2461                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2462                         this_device->offload_enabled = 0;
2463         }
2464 out:
2465         kfree(buf);
2466         return;
2467 }
2468
2469 /* Get the device id from inquiry page 0x83 */
2470 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2471         unsigned char *device_id, int buflen)
2472 {
2473         int rc;
2474         unsigned char *buf;
2475
2476         if (buflen > 16)
2477                 buflen = 16;
2478         buf = kzalloc(64, GFP_KERNEL);
2479         if (!buf)
2480                 return -ENOMEM;
2481         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2482         if (rc == 0)
2483                 memcpy(device_id, &buf[8], buflen);
2484         kfree(buf);
2485         return rc != 0;
2486 }
2487
2488 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2489                 void *buf, int bufsize,
2490                 int extended_response)
2491 {
2492         int rc = IO_OK;
2493         struct CommandList *c;
2494         unsigned char scsi3addr[8];
2495         struct ErrorInfo *ei;
2496
2497         c = cmd_alloc(h);
2498         if (c == NULL) {                        /* trouble... */
2499                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2500                 return -1;
2501         }
2502         /* address the controller */
2503         memset(scsi3addr, 0, sizeof(scsi3addr));
2504         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2505                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2506                 rc = -1;
2507                 goto out;
2508         }
2509         if (extended_response)
2510                 c->Request.CDB[1] = extended_response;
2511         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2512         ei = c->err_info;
2513         if (ei->CommandStatus != 0 &&
2514             ei->CommandStatus != CMD_DATA_UNDERRUN) {
2515                 hpsa_scsi_interpret_error(h, c);
2516                 rc = -1;
2517         } else {
2518                 struct ReportLUNdata *rld = buf;
2519
2520                 if (rld->extended_response_flag != extended_response) {
2521                         dev_err(&h->pdev->dev,
2522                                 "report luns requested format %u, got %u\n",
2523                                 extended_response,
2524                                 rld->extended_response_flag);
2525                         rc = -1;
2526                 }
2527         }
2528 out:
2529         cmd_free(h, c);
2530         return rc;
2531 }
2532
2533 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2534                 struct ReportExtendedLUNdata *buf, int bufsize)
2535 {
2536         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2537                                                 HPSA_REPORT_PHYS_EXTENDED);
2538 }
2539
2540 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2541                 struct ReportLUNdata *buf, int bufsize)
2542 {
2543         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2544 }
2545
2546 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2547         int bus, int target, int lun)
2548 {
2549         device->bus = bus;
2550         device->target = target;
2551         device->lun = lun;
2552 }
2553
2554 /* Use VPD inquiry to get details of volume status */
2555 static int hpsa_get_volume_status(struct ctlr_info *h,
2556                                         unsigned char scsi3addr[])
2557 {
2558         int rc;
2559         int status;
2560         int size;
2561         unsigned char *buf;
2562
2563         buf = kzalloc(64, GFP_KERNEL);
2564         if (!buf)
2565                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2566
2567         /* Does controller have VPD for logical volume status? */
2568         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2569                 goto exit_failed;
2570
2571         /* Get the size of the VPD return buffer */
2572         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2573                                         buf, HPSA_VPD_HEADER_SZ);
2574         if (rc != 0)
2575                 goto exit_failed;
2576         size = buf[3];
2577
2578         /* Now get the whole VPD buffer */
2579         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2580                                         buf, size + HPSA_VPD_HEADER_SZ);
2581         if (rc != 0)
2582                 goto exit_failed;
2583         status = buf[4]; /* status byte */
2584
2585         kfree(buf);
2586         return status;
2587 exit_failed:
2588         kfree(buf);
2589         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2590 }
2591
2592 /* Determine offline status of a volume.
2593  * Return either:
2594  *  0 (not offline)
2595  *  0xff (offline for unknown reasons)
2596  *  # (integer code indicating one of several NOT READY states
2597  *     describing why a volume is to be kept offline)
2598  */
2599 static int hpsa_volume_offline(struct ctlr_info *h,
2600                                         unsigned char scsi3addr[])
2601 {
2602         struct CommandList *c;
2603         unsigned char *sense, sense_key, asc, ascq;
2604         int ldstat = 0;
2605         u16 cmd_status;
2606         u8 scsi_status;
2607 #define ASC_LUN_NOT_READY 0x04
2608 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2609 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2610
2611         c = cmd_alloc(h);
2612         if (!c)
2613                 return 0;
2614         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2615         hpsa_scsi_do_simple_cmd_core(h, c);
2616         sense = c->err_info->SenseInfo;
2617         sense_key = sense[2];
2618         asc = sense[12];
2619         ascq = sense[13];
2620         cmd_status = c->err_info->CommandStatus;
2621         scsi_status = c->err_info->ScsiStatus;
2622         cmd_free(h, c);
2623         /* Is the volume 'not ready'? */
2624         if (cmd_status != CMD_TARGET_STATUS ||
2625                 scsi_status != SAM_STAT_CHECK_CONDITION ||
2626                 sense_key != NOT_READY ||
2627                 asc != ASC_LUN_NOT_READY)  {
2628                 return 0;
2629         }
2630
2631         /* Determine the reason for not ready state */
2632         ldstat = hpsa_get_volume_status(h, scsi3addr);
2633
2634         /* Keep volume offline in certain cases: */
2635         switch (ldstat) {
2636         case HPSA_LV_UNDERGOING_ERASE:
2637         case HPSA_LV_UNDERGOING_RPI:
2638         case HPSA_LV_PENDING_RPI:
2639         case HPSA_LV_ENCRYPTED_NO_KEY:
2640         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2641         case HPSA_LV_UNDERGOING_ENCRYPTION:
2642         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2643         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2644                 return ldstat;
2645         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2646                 /* If VPD status page isn't available,
2647                  * use ASC/ASCQ to determine state
2648                  */
2649                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2650                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2651                         return ldstat;
2652                 break;
2653         default:
2654                 break;
2655         }
2656         return 0;
2657 }
2658
2659 static int hpsa_update_device_info(struct ctlr_info *h,
2660         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2661         unsigned char *is_OBDR_device)
2662 {
2663
2664 #define OBDR_SIG_OFFSET 43
2665 #define OBDR_TAPE_SIG "$DR-10"
2666 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2667 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2668
2669         unsigned char *inq_buff;
2670         unsigned char *obdr_sig;
2671
2672         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2673         if (!inq_buff)
2674                 goto bail_out;
2675
2676         /* Do an inquiry to the device to see what it is. */
2677         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2678                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2679                 /* Inquiry failed (msg printed already) */
2680                 dev_err(&h->pdev->dev,
2681                         "hpsa_update_device_info: inquiry failed\n");
2682                 goto bail_out;
2683         }
2684
2685         this_device->devtype = (inq_buff[0] & 0x1f);
2686         memcpy(this_device->scsi3addr, scsi3addr, 8);
2687         memcpy(this_device->vendor, &inq_buff[8],
2688                 sizeof(this_device->vendor));
2689         memcpy(this_device->model, &inq_buff[16],
2690                 sizeof(this_device->model));
2691         memset(this_device->device_id, 0,
2692                 sizeof(this_device->device_id));
2693         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2694                 sizeof(this_device->device_id));
2695
2696         if (this_device->devtype == TYPE_DISK &&
2697                 is_logical_dev_addr_mode(scsi3addr)) {
2698                 int volume_offline;
2699
2700                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2701                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2702                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2703                 volume_offline = hpsa_volume_offline(h, scsi3addr);
2704                 if (volume_offline < 0 || volume_offline > 0xff)
2705                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2706                 this_device->volume_offline = volume_offline & 0xff;
2707         } else {
2708                 this_device->raid_level = RAID_UNKNOWN;
2709                 this_device->offload_config = 0;
2710                 this_device->offload_enabled = 0;
2711                 this_device->volume_offline = 0;
2712                 this_device->queue_depth = h->nr_cmds;
2713         }
2714
2715         if (is_OBDR_device) {
2716                 /* See if this is a One-Button-Disaster-Recovery device
2717                  * by looking for "$DR-10" at offset 43 in inquiry data.
2718                  */
2719                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2720                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2721                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
2722                                                 OBDR_SIG_LEN) == 0);
2723         }
2724
2725         kfree(inq_buff);
2726         return 0;
2727
2728 bail_out:
2729         kfree(inq_buff);
2730         return 1;
2731 }
2732
2733 static unsigned char *ext_target_model[] = {
2734         "MSA2012",
2735         "MSA2024",
2736         "MSA2312",
2737         "MSA2324",
2738         "P2000 G3 SAS",
2739         "MSA 2040 SAS",
2740         NULL,
2741 };
2742
2743 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2744 {
2745         int i;
2746
2747         for (i = 0; ext_target_model[i]; i++)
2748                 if (strncmp(device->model, ext_target_model[i],
2749                         strlen(ext_target_model[i])) == 0)
2750                         return 1;
2751         return 0;
2752 }
2753
2754 /* Helper function to assign bus, target, lun mapping of devices.
2755  * Puts non-external target logical volumes on bus 0, external target logical
2756  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2757  * Logical drive target and lun are assigned at this time, but
2758  * physical device lun and target assignment are deferred (assigned
2759  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2760  */
2761 static void figure_bus_target_lun(struct ctlr_info *h,
2762         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2763 {
2764         u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2765
2766         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2767                 /* physical device, target and lun filled in later */
2768                 if (is_hba_lunid(lunaddrbytes))
2769                         hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2770                 else
2771                         /* defer target, lun assignment for physical devices */
2772                         hpsa_set_bus_target_lun(device, 2, -1, -1);
2773                 return;
2774         }
2775         /* It's a logical device */
2776         if (is_ext_target(h, device)) {
2777                 /* external target way, put logicals on bus 1
2778                  * and match target/lun numbers box
2779                  * reports, other smart array, bus 0, target 0, match lunid
2780                  */
2781                 hpsa_set_bus_target_lun(device,
2782                         1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2783                 return;
2784         }
2785         hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2786 }
2787
2788 /*
2789  * If there is no lun 0 on a target, linux won't find any devices.
2790  * For the external targets (arrays), we have to manually detect the enclosure
2791  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2792  * it for some reason.  *tmpdevice is the target we're adding,
2793  * this_device is a pointer into the current element of currentsd[]
2794  * that we're building up in update_scsi_devices(), below.
2795  * lunzerobits is a bitmap that tracks which targets already have a
2796  * lun 0 assigned.
2797  * Returns 1 if an enclosure was added, 0 if not.
2798  */
2799 static int add_ext_target_dev(struct ctlr_info *h,
2800         struct hpsa_scsi_dev_t *tmpdevice,
2801         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2802         unsigned long lunzerobits[], int *n_ext_target_devs)
2803 {
2804         unsigned char scsi3addr[8];
2805
2806         if (test_bit(tmpdevice->target, lunzerobits))
2807                 return 0; /* There is already a lun 0 on this target. */
2808
2809         if (!is_logical_dev_addr_mode(lunaddrbytes))
2810                 return 0; /* It's the logical targets that may lack lun 0. */
2811
2812         if (!is_ext_target(h, tmpdevice))
2813                 return 0; /* Only external target devices have this problem. */
2814
2815         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2816                 return 0;
2817
2818         memset(scsi3addr, 0, 8);
2819         scsi3addr[3] = tmpdevice->target;
2820         if (is_hba_lunid(scsi3addr))
2821                 return 0; /* Don't add the RAID controller here. */
2822
2823         if (is_scsi_rev_5(h))
2824                 return 0; /* p1210m doesn't need to do this. */
2825
2826         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2827                 dev_warn(&h->pdev->dev, "Maximum number of external "
2828                         "target devices exceeded.  Check your hardware "
2829                         "configuration.");
2830                 return 0;
2831         }
2832
2833         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2834                 return 0;
2835         (*n_ext_target_devs)++;
2836         hpsa_set_bus_target_lun(this_device,
2837                                 tmpdevice->bus, tmpdevice->target, 0);
2838         set_bit(tmpdevice->target, lunzerobits);
2839         return 1;
2840 }
2841
2842 /*
2843  * Get address of physical disk used for an ioaccel2 mode command:
2844  *      1. Extract ioaccel2 handle from the command.
2845  *      2. Find a matching ioaccel2 handle from list of physical disks.
2846  *      3. Return:
2847  *              1 and set scsi3addr to address of matching physical
2848  *              0 if no matching physical disk was found.
2849  */
2850 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2851         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2852 {
2853         struct ReportExtendedLUNdata *physicals = NULL;
2854         int responsesize = 24;  /* size of physical extended response */
2855         int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2856         u32 nphysicals = 0;     /* number of reported physical devs */
2857         int found = 0;          /* found match (1) or not (0) */
2858         u32 find;               /* handle we need to match */
2859         int i;
2860         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2861         struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2862         struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2863         __le32 it_nexus;        /* 4 byte device handle for the ioaccel2 cmd */
2864         __le32 scsi_nexus;      /* 4 byte device handle for the ioaccel2 cmd */
2865
2866         if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2867                 return 0; /* no match */
2868
2869         /* point to the ioaccel2 device handle */
2870         c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2871         if (c2a == NULL)
2872                 return 0; /* no match */
2873
2874         scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2875         if (scmd == NULL)
2876                 return 0; /* no match */
2877
2878         d = scmd->device->hostdata;
2879         if (d == NULL)
2880                 return 0; /* no match */
2881
2882         it_nexus = cpu_to_le32(d->ioaccel_handle);
2883         scsi_nexus = c2a->scsi_nexus;
2884         find = le32_to_cpu(c2a->scsi_nexus);
2885
2886         if (h->raid_offload_debug > 0)
2887                 dev_info(&h->pdev->dev,
2888                         "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2889                         __func__, scsi_nexus,
2890                         d->device_id[0], d->device_id[1], d->device_id[2],
2891                         d->device_id[3], d->device_id[4], d->device_id[5],
2892                         d->device_id[6], d->device_id[7], d->device_id[8],
2893                         d->device_id[9], d->device_id[10], d->device_id[11],
2894                         d->device_id[12], d->device_id[13], d->device_id[14],
2895                         d->device_id[15]);
2896
2897         /* Get the list of physical devices */
2898         physicals = kzalloc(reportsize, GFP_KERNEL);
2899         if (physicals == NULL)
2900                 return 0;
2901         if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
2902                 dev_err(&h->pdev->dev,
2903                         "Can't lookup %s device handle: report physical LUNs failed.\n",
2904                         "HP SSD Smart Path");
2905                 kfree(physicals);
2906                 return 0;
2907         }
2908         nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2909                                                         responsesize;
2910
2911         /* find ioaccel2 handle in list of physicals: */
2912         for (i = 0; i < nphysicals; i++) {
2913                 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2914
2915                 /* handle is in bytes 28-31 of each lun */
2916                 if (entry->ioaccel_handle != find)
2917                         continue; /* didn't match */
2918                 found = 1;
2919                 memcpy(scsi3addr, entry->lunid, 8);
2920                 if (h->raid_offload_debug > 0)
2921                         dev_info(&h->pdev->dev,
2922                                 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2923                                 __func__, find,
2924                                 entry->ioaccel_handle, scsi3addr);
2925                 break; /* found it */
2926         }
2927
2928         kfree(physicals);
2929         if (found)
2930                 return 1;
2931         else
2932                 return 0;
2933
2934 }
2935 /*
2936  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
2937  * logdev.  The number of luns in physdev and logdev are returned in
2938  * *nphysicals and *nlogicals, respectively.
2939  * Returns 0 on success, -1 otherwise.
2940  */
2941 static int hpsa_gather_lun_info(struct ctlr_info *h,
2942         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
2943         struct ReportLUNdata *logdev, u32 *nlogicals)
2944 {
2945         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
2946                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2947                 return -1;
2948         }
2949         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
2950         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2951                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
2952                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
2953                 *nphysicals = HPSA_MAX_PHYS_LUN;
2954         }
2955         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
2956                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2957                 return -1;
2958         }
2959         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2960         /* Reject Logicals in excess of our max capability. */
2961         if (*nlogicals > HPSA_MAX_LUN) {
2962                 dev_warn(&h->pdev->dev,
2963                         "maximum logical LUNs (%d) exceeded.  "
2964                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
2965                         *nlogicals - HPSA_MAX_LUN);
2966                         *nlogicals = HPSA_MAX_LUN;
2967         }
2968         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2969                 dev_warn(&h->pdev->dev,
2970                         "maximum logical + physical LUNs (%d) exceeded. "
2971                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2972                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2973                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2974         }
2975         return 0;
2976 }
2977
2978 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2979         int i, int nphysicals, int nlogicals,
2980         struct ReportExtendedLUNdata *physdev_list,
2981         struct ReportLUNdata *logdev_list)
2982 {
2983         /* Helper function, figure out where the LUN ID info is coming from
2984          * given index i, lists of physical and logical devices, where in
2985          * the list the raid controller is supposed to appear (first or last)
2986          */
2987
2988         int logicals_start = nphysicals + (raid_ctlr_position == 0);
2989         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2990
2991         if (i == raid_ctlr_position)
2992                 return RAID_CTLR_LUNID;
2993
2994         if (i < logicals_start)
2995                 return &physdev_list->LUN[i -
2996                                 (raid_ctlr_position == 0)].lunid[0];
2997
2998         if (i < last_device)
2999                 return &logdev_list->LUN[i - nphysicals -
3000                         (raid_ctlr_position == 0)][0];
3001         BUG();
3002         return NULL;
3003 }
3004
3005 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3006 {
3007         int rc;
3008         int hba_mode_enabled;
3009         struct bmic_controller_parameters *ctlr_params;
3010         ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3011                 GFP_KERNEL);
3012
3013         if (!ctlr_params)
3014                 return -ENOMEM;
3015         rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3016                 sizeof(struct bmic_controller_parameters));
3017         if (rc) {
3018                 kfree(ctlr_params);
3019                 return rc;
3020         }
3021
3022         hba_mode_enabled =
3023                 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3024         kfree(ctlr_params);
3025         return hba_mode_enabled;
3026 }
3027
3028 /* get physical drive ioaccel handle and queue depth */
3029 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3030                 struct hpsa_scsi_dev_t *dev,
3031                 u8 *lunaddrbytes,
3032                 struct bmic_identify_physical_device *id_phys)
3033 {
3034         int rc;
3035         struct ext_report_lun_entry *rle =
3036                 (struct ext_report_lun_entry *) lunaddrbytes;
3037
3038         dev->ioaccel_handle = rle->ioaccel_handle;
3039         memset(id_phys, 0, sizeof(*id_phys));
3040         rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3041                         GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3042                         sizeof(*id_phys));
3043         if (!rc)
3044                 /* Reserve space for FW operations */
3045 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3046 #define DRIVE_QUEUE_DEPTH 7
3047                 dev->queue_depth =
3048                         le16_to_cpu(id_phys->current_queue_depth_limit) -
3049                                 DRIVE_CMDS_RESERVED_FOR_FW;
3050         else
3051                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3052         atomic_set(&dev->ioaccel_cmds_out, 0);
3053 }
3054
3055 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3056 {
3057         /* the idea here is we could get notified
3058          * that some devices have changed, so we do a report
3059          * physical luns and report logical luns cmd, and adjust
3060          * our list of devices accordingly.
3061          *
3062          * The scsi3addr's of devices won't change so long as the
3063          * adapter is not reset.  That means we can rescan and
3064          * tell which devices we already know about, vs. new
3065          * devices, vs.  disappearing devices.
3066          */
3067         struct ReportExtendedLUNdata *physdev_list = NULL;
3068         struct ReportLUNdata *logdev_list = NULL;
3069         struct bmic_identify_physical_device *id_phys = NULL;
3070         u32 nphysicals = 0;
3071         u32 nlogicals = 0;
3072         u32 ndev_allocated = 0;
3073         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3074         int ncurrent = 0;
3075         int i, n_ext_target_devs, ndevs_to_allocate;
3076         int raid_ctlr_position;
3077         int rescan_hba_mode;
3078         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3079
3080         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3081         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3082         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3083         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3084         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3085
3086         if (!currentsd || !physdev_list || !logdev_list ||
3087                 !tmpdevice || !id_phys) {
3088                 dev_err(&h->pdev->dev, "out of memory\n");
3089                 goto out;
3090         }
3091         memset(lunzerobits, 0, sizeof(lunzerobits));
3092
3093         rescan_hba_mode = hpsa_hba_mode_enabled(h);
3094         if (rescan_hba_mode < 0)
3095                 goto out;
3096
3097         if (!h->hba_mode_enabled && rescan_hba_mode)
3098                 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3099         else if (h->hba_mode_enabled && !rescan_hba_mode)
3100                 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3101
3102         h->hba_mode_enabled = rescan_hba_mode;
3103
3104         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3105                         logdev_list, &nlogicals))
3106                 goto out;
3107
3108         /* We might see up to the maximum number of logical and physical disks
3109          * plus external target devices, and a device for the local RAID
3110          * controller.
3111          */
3112         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3113
3114         /* Allocate the per device structures */
3115         for (i = 0; i < ndevs_to_allocate; i++) {
3116                 if (i >= HPSA_MAX_DEVICES) {
3117                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3118                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3119                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3120                         break;
3121                 }
3122
3123                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3124                 if (!currentsd[i]) {
3125                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3126                                 __FILE__, __LINE__);
3127                         goto out;
3128                 }
3129                 ndev_allocated++;
3130         }
3131
3132         if (is_scsi_rev_5(h))
3133                 raid_ctlr_position = 0;
3134         else
3135                 raid_ctlr_position = nphysicals + nlogicals;
3136
3137         /* adjust our table of devices */
3138         n_ext_target_devs = 0;
3139         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3140                 u8 *lunaddrbytes, is_OBDR = 0;
3141
3142                 /* Figure out where the LUN ID info is coming from */
3143                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3144                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3145                 /* skip masked physical devices. */
3146                 if (lunaddrbytes[3] & 0xC0 &&
3147                         i < nphysicals + (raid_ctlr_position == 0))
3148                         continue;
3149
3150                 /* Get device type, vendor, model, device id */
3151                 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3152                                                         &is_OBDR))
3153                         continue; /* skip it if we can't talk to it. */
3154                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3155                 this_device = currentsd[ncurrent];
3156
3157                 /*
3158                  * For external target devices, we have to insert a LUN 0 which
3159                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3160                  * is nonetheless an enclosure device there.  We have to
3161                  * present that otherwise linux won't find anything if
3162                  * there is no lun 0.
3163                  */
3164                 if (add_ext_target_dev(h, tmpdevice, this_device,
3165                                 lunaddrbytes, lunzerobits,
3166                                 &n_ext_target_devs)) {
3167                         ncurrent++;
3168                         this_device = currentsd[ncurrent];
3169                 }
3170
3171                 *this_device = *tmpdevice;
3172
3173                 switch (this_device->devtype) {
3174                 case TYPE_ROM:
3175                         /* We don't *really* support actual CD-ROM devices,
3176                          * just "One Button Disaster Recovery" tape drive
3177                          * which temporarily pretends to be a CD-ROM drive.
3178                          * So we check that the device is really an OBDR tape
3179                          * device by checking for "$DR-10" in bytes 43-48 of
3180                          * the inquiry data.
3181                          */
3182                         if (is_OBDR)
3183                                 ncurrent++;
3184                         break;
3185                 case TYPE_DISK:
3186                         if (h->hba_mode_enabled) {
3187                                 /* never use raid mapper in HBA mode */
3188                                 this_device->offload_enabled = 0;
3189                                 ncurrent++;
3190                                 break;
3191                         } else if (h->acciopath_status) {
3192                                 if (i >= nphysicals) {
3193                                         ncurrent++;
3194                                         break;
3195                                 }
3196                         } else {
3197                                 if (i < nphysicals)
3198                                         break;
3199                                 ncurrent++;
3200                                 break;
3201                         }
3202                         if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3203                                 h->transMethod & CFGTBL_Trans_io_accel2) {
3204                                 hpsa_get_ioaccel_drive_info(h, this_device,
3205                                                         lunaddrbytes, id_phys);
3206                                 atomic_set(&this_device->ioaccel_cmds_out, 0);
3207                                 ncurrent++;
3208                         }
3209                         break;
3210                 case TYPE_TAPE:
3211                 case TYPE_MEDIUM_CHANGER:
3212                         ncurrent++;
3213                         break;
3214                 case TYPE_RAID:
3215                         /* Only present the Smartarray HBA as a RAID controller.
3216                          * If it's a RAID controller other than the HBA itself
3217                          * (an external RAID controller, MSA500 or similar)
3218                          * don't present it.
3219                          */
3220                         if (!is_hba_lunid(lunaddrbytes))
3221                                 break;
3222                         ncurrent++;
3223                         break;
3224                 default:
3225                         break;
3226                 }
3227                 if (ncurrent >= HPSA_MAX_DEVICES)
3228                         break;
3229         }
3230         hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
3231         adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3232 out:
3233         kfree(tmpdevice);
3234         for (i = 0; i < ndev_allocated; i++)
3235                 kfree(currentsd[i]);
3236         kfree(currentsd);
3237         kfree(physdev_list);
3238         kfree(logdev_list);
3239         kfree(id_phys);
3240 }
3241
3242 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3243                                    struct scatterlist *sg)
3244 {
3245         u64 addr64 = (u64) sg_dma_address(sg);
3246         unsigned int len = sg_dma_len(sg);
3247
3248         desc->Addr = cpu_to_le64(addr64);
3249         desc->Len = cpu_to_le32(len);
3250         desc->Ext = 0;
3251 }
3252
3253 /*
3254  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3255  * dma mapping  and fills in the scatter gather entries of the
3256  * hpsa command, cp.
3257  */
3258 static int hpsa_scatter_gather(struct ctlr_info *h,
3259                 struct CommandList *cp,
3260                 struct scsi_cmnd *cmd)
3261 {
3262         struct scatterlist *sg;
3263         int use_sg, i, sg_index, chained;
3264         struct SGDescriptor *curr_sg;
3265
3266         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3267
3268         use_sg = scsi_dma_map(cmd);
3269         if (use_sg < 0)
3270                 return use_sg;
3271
3272         if (!use_sg)
3273                 goto sglist_finished;
3274
3275         curr_sg = cp->SG;
3276         chained = 0;
3277         sg_index = 0;
3278         scsi_for_each_sg(cmd, sg, use_sg, i) {
3279                 if (i == h->max_cmd_sg_entries - 1 &&
3280                         use_sg > h->max_cmd_sg_entries) {
3281                         chained = 1;
3282                         curr_sg = h->cmd_sg_list[cp->cmdindex];
3283                         sg_index = 0;
3284                 }
3285                 hpsa_set_sg_descriptor(curr_sg, sg);
3286                 curr_sg++;
3287         }
3288
3289         /* Back the pointer up to the last entry and mark it as "last". */
3290         (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3291
3292         if (use_sg + chained > h->maxSG)
3293                 h->maxSG = use_sg + chained;
3294
3295         if (chained) {
3296                 cp->Header.SGList = h->max_cmd_sg_entries;
3297                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3298                 if (hpsa_map_sg_chain_block(h, cp)) {
3299                         scsi_dma_unmap(cmd);
3300                         return -1;
3301                 }
3302                 return 0;
3303         }
3304
3305 sglist_finished:
3306
3307         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
3308         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3309         return 0;
3310 }
3311
3312 #define IO_ACCEL_INELIGIBLE (1)
3313 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3314 {
3315         int is_write = 0;
3316         u32 block;
3317         u32 block_cnt;
3318
3319         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3320         switch (cdb[0]) {
3321         case WRITE_6:
3322         case WRITE_12:
3323                 is_write = 1;
3324         case READ_6:
3325         case READ_12:
3326                 if (*cdb_len == 6) {
3327                         block = (((u32) cdb[2]) << 8) | cdb[3];
3328                         block_cnt = cdb[4];
3329                 } else {
3330                         BUG_ON(*cdb_len != 12);
3331                         block = (((u32) cdb[2]) << 24) |
3332                                 (((u32) cdb[3]) << 16) |
3333                                 (((u32) cdb[4]) << 8) |
3334                                 cdb[5];
3335                         block_cnt =
3336                                 (((u32) cdb[6]) << 24) |
3337                                 (((u32) cdb[7]) << 16) |
3338                                 (((u32) cdb[8]) << 8) |
3339                                 cdb[9];
3340                 }
3341                 if (block_cnt > 0xffff)
3342                         return IO_ACCEL_INELIGIBLE;
3343
3344                 cdb[0] = is_write ? WRITE_10 : READ_10;
3345                 cdb[1] = 0;
3346                 cdb[2] = (u8) (block >> 24);
3347                 cdb[3] = (u8) (block >> 16);
3348                 cdb[4] = (u8) (block >> 8);
3349                 cdb[5] = (u8) (block);
3350                 cdb[6] = 0;
3351                 cdb[7] = (u8) (block_cnt >> 8);
3352                 cdb[8] = (u8) (block_cnt);
3353                 cdb[9] = 0;
3354                 *cdb_len = 10;
3355                 break;
3356         }
3357         return 0;
3358 }
3359
3360 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3361         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3362         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3363 {
3364         struct scsi_cmnd *cmd = c->scsi_cmd;
3365         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3366         unsigned int len;
3367         unsigned int total_len = 0;
3368         struct scatterlist *sg;
3369         u64 addr64;
3370         int use_sg, i;
3371         struct SGDescriptor *curr_sg;
3372         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3373
3374         /* TODO: implement chaining support */
3375         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3376                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3377                 return IO_ACCEL_INELIGIBLE;
3378         }
3379
3380         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3381
3382         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3383                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3384                 return IO_ACCEL_INELIGIBLE;
3385         }
3386
3387         c->cmd_type = CMD_IOACCEL1;
3388
3389         /* Adjust the DMA address to point to the accelerated command buffer */
3390         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3391                                 (c->cmdindex * sizeof(*cp));
3392         BUG_ON(c->busaddr & 0x0000007F);
3393
3394         use_sg = scsi_dma_map(cmd);
3395         if (use_sg < 0) {
3396                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3397                 return use_sg;
3398         }
3399
3400         if (use_sg) {
3401                 curr_sg = cp->SG;
3402                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3403                         addr64 = (u64) sg_dma_address(sg);
3404                         len  = sg_dma_len(sg);
3405                         total_len += len;
3406                         curr_sg->Addr = cpu_to_le64(addr64);
3407                         curr_sg->Len = cpu_to_le32(len);
3408                         curr_sg->Ext = cpu_to_le32(0);
3409                         curr_sg++;
3410                 }
3411                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3412
3413                 switch (cmd->sc_data_direction) {
3414                 case DMA_TO_DEVICE:
3415                         control |= IOACCEL1_CONTROL_DATA_OUT;
3416                         break;
3417                 case DMA_FROM_DEVICE:
3418                         control |= IOACCEL1_CONTROL_DATA_IN;
3419                         break;
3420                 case DMA_NONE:
3421                         control |= IOACCEL1_CONTROL_NODATAXFER;
3422                         break;
3423                 default:
3424                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3425                         cmd->sc_data_direction);
3426                         BUG();
3427                         break;
3428                 }
3429         } else {
3430                 control |= IOACCEL1_CONTROL_NODATAXFER;
3431         }
3432
3433         c->Header.SGList = use_sg;
3434         /* Fill out the command structure to submit */
3435         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3436         cp->transfer_len = cpu_to_le32(total_len);
3437         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3438                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3439         cp->control = cpu_to_le32(control);
3440         memcpy(cp->CDB, cdb, cdb_len);
3441         memcpy(cp->CISS_LUN, scsi3addr, 8);
3442         /* Tag was already set at init time. */
3443         enqueue_cmd_and_start_io(h, c);
3444         return 0;
3445 }
3446
3447 /*
3448  * Queue a command directly to a device behind the controller using the
3449  * I/O accelerator path.
3450  */
3451 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3452         struct CommandList *c)
3453 {
3454         struct scsi_cmnd *cmd = c->scsi_cmd;
3455         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3456
3457         c->phys_disk = dev;
3458
3459         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3460                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3461 }
3462
3463 /*
3464  * Set encryption parameters for the ioaccel2 request
3465  */
3466 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3467         struct CommandList *c, struct io_accel2_cmd *cp)
3468 {
3469         struct scsi_cmnd *cmd = c->scsi_cmd;
3470         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3471         struct raid_map_data *map = &dev->raid_map;
3472         u64 first_block;
3473
3474         /* Are we doing encryption on this device */
3475         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3476                 return;
3477         /* Set the data encryption key index. */
3478         cp->dekindex = map->dekindex;
3479
3480         /* Set the encryption enable flag, encoded into direction field. */
3481         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3482
3483         /* Set encryption tweak values based on logical block address
3484          * If block size is 512, tweak value is LBA.
3485          * For other block sizes, tweak is (LBA * block size)/ 512)
3486          */
3487         switch (cmd->cmnd[0]) {
3488         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3489         case WRITE_6:
3490         case READ_6:
3491                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3492                 break;
3493         case WRITE_10:
3494         case READ_10:
3495         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3496         case WRITE_12:
3497         case READ_12:
3498                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3499                 break;
3500         case WRITE_16:
3501         case READ_16:
3502                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3503                 break;
3504         default:
3505                 dev_err(&h->pdev->dev,
3506                         "ERROR: %s: size (0x%x) not supported for encryption\n",
3507                         __func__, cmd->cmnd[0]);
3508                 BUG();
3509                 break;
3510         }
3511
3512         if (le32_to_cpu(map->volume_blk_size) != 512)
3513                 first_block = first_block *
3514                                 le32_to_cpu(map->volume_blk_size)/512;
3515
3516         cp->tweak_lower = cpu_to_le32(first_block);
3517         cp->tweak_upper = cpu_to_le32(first_block >> 32);
3518 }
3519
3520 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3521         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3522         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3523 {
3524         struct scsi_cmnd *cmd = c->scsi_cmd;
3525         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3526         struct ioaccel2_sg_element *curr_sg;
3527         int use_sg, i;
3528         struct scatterlist *sg;
3529         u64 addr64;
3530         u32 len;
3531         u32 total_len = 0;
3532
3533         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3534                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3535                 return IO_ACCEL_INELIGIBLE;
3536         }
3537
3538         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3539                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3540                 return IO_ACCEL_INELIGIBLE;
3541         }
3542
3543         c->cmd_type = CMD_IOACCEL2;
3544         /* Adjust the DMA address to point to the accelerated command buffer */
3545         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3546                                 (c->cmdindex * sizeof(*cp));
3547         BUG_ON(c->busaddr & 0x0000007F);
3548
3549         memset(cp, 0, sizeof(*cp));
3550         cp->IU_type = IOACCEL2_IU_TYPE;
3551
3552         use_sg = scsi_dma_map(cmd);
3553         if (use_sg < 0) {
3554                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3555                 return use_sg;
3556         }
3557
3558         if (use_sg) {
3559                 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3560                 curr_sg = cp->sg;
3561                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3562                         addr64 = (u64) sg_dma_address(sg);
3563                         len  = sg_dma_len(sg);
3564                         total_len += len;
3565                         curr_sg->address = cpu_to_le64(addr64);
3566                         curr_sg->length = cpu_to_le32(len);
3567                         curr_sg->reserved[0] = 0;
3568                         curr_sg->reserved[1] = 0;
3569                         curr_sg->reserved[2] = 0;
3570                         curr_sg->chain_indicator = 0;
3571                         curr_sg++;
3572                 }
3573
3574                 switch (cmd->sc_data_direction) {
3575                 case DMA_TO_DEVICE:
3576                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3577                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
3578                         break;
3579                 case DMA_FROM_DEVICE:
3580                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3581                         cp->direction |= IOACCEL2_DIR_DATA_IN;
3582                         break;
3583                 case DMA_NONE:
3584                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3585                         cp->direction |= IOACCEL2_DIR_NO_DATA;
3586                         break;
3587                 default:
3588                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3589                                 cmd->sc_data_direction);
3590                         BUG();
3591                         break;
3592                 }
3593         } else {
3594                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3595                 cp->direction |= IOACCEL2_DIR_NO_DATA;
3596         }
3597
3598         /* Set encryption parameters, if necessary */
3599         set_encrypt_ioaccel2(h, c, cp);
3600
3601         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3602         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3603         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3604
3605         /* fill in sg elements */
3606         cp->sg_count = (u8) use_sg;
3607
3608         cp->data_len = cpu_to_le32(total_len);
3609         cp->err_ptr = cpu_to_le64(c->busaddr +
3610                         offsetof(struct io_accel2_cmd, error_data));
3611         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3612
3613         enqueue_cmd_and_start_io(h, c);
3614         return 0;
3615 }
3616
3617 /*
3618  * Queue a command to the correct I/O accelerator path.
3619  */
3620 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3621         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3622         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3623 {
3624         /* Try to honor the device's queue depth */
3625         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3626                                         phys_disk->queue_depth) {
3627                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3628                 return IO_ACCEL_INELIGIBLE;
3629         }
3630         if (h->transMethod & CFGTBL_Trans_io_accel1)
3631                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3632                                                 cdb, cdb_len, scsi3addr,
3633                                                 phys_disk);
3634         else
3635                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3636                                                 cdb, cdb_len, scsi3addr,
3637                                                 phys_disk);
3638 }
3639
3640 static void raid_map_helper(struct raid_map_data *map,
3641                 int offload_to_mirror, u32 *map_index, u32 *current_group)
3642 {
3643         if (offload_to_mirror == 0)  {
3644                 /* use physical disk in the first mirrored group. */
3645                 *map_index %= le16_to_cpu(map->data_disks_per_row);
3646                 return;
3647         }
3648         do {
3649                 /* determine mirror group that *map_index indicates */
3650                 *current_group = *map_index /
3651                         le16_to_cpu(map->data_disks_per_row);
3652                 if (offload_to_mirror == *current_group)
3653                         continue;
3654                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3655                         /* select map index from next group */
3656                         *map_index += le16_to_cpu(map->data_disks_per_row);
3657                         (*current_group)++;
3658                 } else {
3659                         /* select map index from first group */
3660                         *map_index %= le16_to_cpu(map->data_disks_per_row);
3661                         *current_group = 0;
3662                 }
3663         } while (offload_to_mirror != *current_group);
3664 }
3665
3666 /*
3667  * Attempt to perform offload RAID mapping for a logical volume I/O.
3668  */
3669 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3670         struct CommandList *c)
3671 {
3672         struct scsi_cmnd *cmd = c->scsi_cmd;
3673         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3674         struct raid_map_data *map = &dev->raid_map;
3675         struct raid_map_disk_data *dd = &map->data[0];
3676         int is_write = 0;
3677         u32 map_index;
3678         u64 first_block, last_block;
3679         u32 block_cnt;
3680         u32 blocks_per_row;
3681         u64 first_row, last_row;
3682         u32 first_row_offset, last_row_offset;
3683         u32 first_column, last_column;
3684         u64 r0_first_row, r0_last_row;
3685         u32 r5or6_blocks_per_row;
3686         u64 r5or6_first_row, r5or6_last_row;
3687         u32 r5or6_first_row_offset, r5or6_last_row_offset;
3688         u32 r5or6_first_column, r5or6_last_column;
3689         u32 total_disks_per_row;
3690         u32 stripesize;
3691         u32 first_group, last_group, current_group;
3692         u32 map_row;
3693         u32 disk_handle;
3694         u64 disk_block;
3695         u32 disk_block_cnt;
3696         u8 cdb[16];
3697         u8 cdb_len;
3698         u16 strip_size;
3699 #if BITS_PER_LONG == 32
3700         u64 tmpdiv;
3701 #endif
3702         int offload_to_mirror;
3703
3704         /* check for valid opcode, get LBA and block count */
3705         switch (cmd->cmnd[0]) {
3706         case WRITE_6:
3707                 is_write = 1;
3708         case READ_6:
3709                 first_block =
3710                         (((u64) cmd->cmnd[2]) << 8) |
3711                         cmd->cmnd[3];
3712                 block_cnt = cmd->cmnd[4];
3713                 if (block_cnt == 0)
3714                         block_cnt = 256;
3715                 break;
3716         case WRITE_10:
3717                 is_write = 1;
3718         case READ_10:
3719                 first_block =
3720                         (((u64) cmd->cmnd[2]) << 24) |
3721                         (((u64) cmd->cmnd[3]) << 16) |
3722                         (((u64) cmd->cmnd[4]) << 8) |
3723                         cmd->cmnd[5];
3724                 block_cnt =
3725                         (((u32) cmd->cmnd[7]) << 8) |
3726                         cmd->cmnd[8];
3727                 break;
3728         case WRITE_12:
3729                 is_write = 1;
3730         case READ_12:
3731                 first_block =
3732                         (((u64) cmd->cmnd[2]) << 24) |
3733                         (((u64) cmd->cmnd[3]) << 16) |
3734                         (((u64) cmd->cmnd[4]) << 8) |
3735                         cmd->cmnd[5];
3736                 block_cnt =
3737                         (((u32) cmd->cmnd[6]) << 24) |
3738                         (((u32) cmd->cmnd[7]) << 16) |
3739                         (((u32) cmd->cmnd[8]) << 8) |
3740                 cmd->cmnd[9];
3741                 break;
3742         case WRITE_16:
3743                 is_write = 1;
3744         case READ_16:
3745                 first_block =
3746                         (((u64) cmd->cmnd[2]) << 56) |
3747                         (((u64) cmd->cmnd[3]) << 48) |
3748                         (((u64) cmd->cmnd[4]) << 40) |
3749                         (((u64) cmd->cmnd[5]) << 32) |
3750                         (((u64) cmd->cmnd[6]) << 24) |
3751                         (((u64) cmd->cmnd[7]) << 16) |
3752                         (((u64) cmd->cmnd[8]) << 8) |
3753                         cmd->cmnd[9];
3754                 block_cnt =
3755                         (((u32) cmd->cmnd[10]) << 24) |
3756                         (((u32) cmd->cmnd[11]) << 16) |
3757                         (((u32) cmd->cmnd[12]) << 8) |
3758                         cmd->cmnd[13];
3759                 break;
3760         default:
3761                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3762         }
3763         last_block = first_block + block_cnt - 1;
3764
3765         /* check for write to non-RAID-0 */
3766         if (is_write && dev->raid_level != 0)
3767                 return IO_ACCEL_INELIGIBLE;
3768
3769         /* check for invalid block or wraparound */
3770         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
3771                 last_block < first_block)
3772                 return IO_ACCEL_INELIGIBLE;
3773
3774         /* calculate stripe information for the request */
3775         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
3776                                 le16_to_cpu(map->strip_size);
3777         strip_size = le16_to_cpu(map->strip_size);
3778 #if BITS_PER_LONG == 32
3779         tmpdiv = first_block;
3780         (void) do_div(tmpdiv, blocks_per_row);
3781         first_row = tmpdiv;
3782         tmpdiv = last_block;
3783         (void) do_div(tmpdiv, blocks_per_row);
3784         last_row = tmpdiv;
3785         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3786         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3787         tmpdiv = first_row_offset;
3788         (void) do_div(tmpdiv, strip_size);
3789         first_column = tmpdiv;
3790         tmpdiv = last_row_offset;
3791         (void) do_div(tmpdiv, strip_size);
3792         last_column = tmpdiv;
3793 #else
3794         first_row = first_block / blocks_per_row;
3795         last_row = last_block / blocks_per_row;
3796         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3797         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3798         first_column = first_row_offset / strip_size;
3799         last_column = last_row_offset / strip_size;
3800 #endif
3801
3802         /* if this isn't a single row/column then give to the controller */
3803         if ((first_row != last_row) || (first_column != last_column))
3804                 return IO_ACCEL_INELIGIBLE;
3805
3806         /* proceeding with driver mapping */
3807         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
3808                                 le16_to_cpu(map->metadata_disks_per_row);
3809         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3810                                 le16_to_cpu(map->row_cnt);
3811         map_index = (map_row * total_disks_per_row) + first_column;
3812
3813         switch (dev->raid_level) {
3814         case HPSA_RAID_0:
3815                 break; /* nothing special to do */
3816         case HPSA_RAID_1:
3817                 /* Handles load balance across RAID 1 members.
3818                  * (2-drive R1 and R10 with even # of drives.)
3819                  * Appropriate for SSDs, not optimal for HDDs
3820                  */
3821                 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
3822                 if (dev->offload_to_mirror)
3823                         map_index += le16_to_cpu(map->data_disks_per_row);
3824                 dev->offload_to_mirror = !dev->offload_to_mirror;
3825                 break;
3826         case HPSA_RAID_ADM:
3827                 /* Handles N-way mirrors  (R1-ADM)
3828                  * and R10 with # of drives divisible by 3.)
3829                  */
3830                 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
3831
3832                 offload_to_mirror = dev->offload_to_mirror;
3833                 raid_map_helper(map, offload_to_mirror,
3834                                 &map_index, &current_group);
3835                 /* set mirror group to use next time */
3836                 offload_to_mirror =
3837                         (offload_to_mirror >=
3838                         le16_to_cpu(map->layout_map_count) - 1)
3839                         ? 0 : offload_to_mirror + 1;
3840                 dev->offload_to_mirror = offload_to_mirror;
3841                 /* Avoid direct use of dev->offload_to_mirror within this
3842                  * function since multiple threads might simultaneously
3843                  * increment it beyond the range of dev->layout_map_count -1.
3844                  */
3845                 break;
3846         case HPSA_RAID_5:
3847         case HPSA_RAID_6:
3848                 if (le16_to_cpu(map->layout_map_count) <= 1)
3849                         break;
3850
3851                 /* Verify first and last block are in same RAID group */
3852                 r5or6_blocks_per_row =
3853                         le16_to_cpu(map->strip_size) *
3854                         le16_to_cpu(map->data_disks_per_row);
3855                 BUG_ON(r5or6_blocks_per_row == 0);
3856                 stripesize = r5or6_blocks_per_row *
3857                         le16_to_cpu(map->layout_map_count);
3858 #if BITS_PER_LONG == 32
3859                 tmpdiv = first_block;
3860                 first_group = do_div(tmpdiv, stripesize);
3861                 tmpdiv = first_group;
3862                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3863                 first_group = tmpdiv;
3864                 tmpdiv = last_block;
3865                 last_group = do_div(tmpdiv, stripesize);
3866                 tmpdiv = last_group;
3867                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3868                 last_group = tmpdiv;
3869 #else
3870                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3871                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3872 #endif
3873                 if (first_group != last_group)
3874                         return IO_ACCEL_INELIGIBLE;
3875
3876                 /* Verify request is in a single row of RAID 5/6 */
3877 #if BITS_PER_LONG == 32
3878                 tmpdiv = first_block;
3879                 (void) do_div(tmpdiv, stripesize);
3880                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3881                 tmpdiv = last_block;
3882                 (void) do_div(tmpdiv, stripesize);
3883                 r5or6_last_row = r0_last_row = tmpdiv;
3884 #else
3885                 first_row = r5or6_first_row = r0_first_row =
3886                                                 first_block / stripesize;
3887                 r5or6_last_row = r0_last_row = last_block / stripesize;
3888 #endif
3889                 if (r5or6_first_row != r5or6_last_row)
3890                         return IO_ACCEL_INELIGIBLE;
3891
3892
3893                 /* Verify request is in a single column */
3894 #if BITS_PER_LONG == 32
3895                 tmpdiv = first_block;
3896                 first_row_offset = do_div(tmpdiv, stripesize);
3897                 tmpdiv = first_row_offset;
3898                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3899                 r5or6_first_row_offset = first_row_offset;
3900                 tmpdiv = last_block;
3901                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3902                 tmpdiv = r5or6_last_row_offset;
3903                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3904                 tmpdiv = r5or6_first_row_offset;
3905                 (void) do_div(tmpdiv, map->strip_size);
3906                 first_column = r5or6_first_column = tmpdiv;
3907                 tmpdiv = r5or6_last_row_offset;
3908                 (void) do_div(tmpdiv, map->strip_size);
3909                 r5or6_last_column = tmpdiv;
3910 #else
3911                 first_row_offset = r5or6_first_row_offset =
3912                         (u32)((first_block % stripesize) %
3913                                                 r5or6_blocks_per_row);
3914
3915                 r5or6_last_row_offset =
3916                         (u32)((last_block % stripesize) %
3917                                                 r5or6_blocks_per_row);
3918
3919                 first_column = r5or6_first_column =
3920                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
3921                 r5or6_last_column =
3922                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
3923 #endif
3924                 if (r5or6_first_column != r5or6_last_column)
3925                         return IO_ACCEL_INELIGIBLE;
3926
3927                 /* Request is eligible */
3928                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3929                         le16_to_cpu(map->row_cnt);
3930
3931                 map_index = (first_group *
3932                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
3933                         (map_row * total_disks_per_row) + first_column;
3934                 break;
3935         default:
3936                 return IO_ACCEL_INELIGIBLE;
3937         }
3938
3939         if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
3940                 return IO_ACCEL_INELIGIBLE;
3941
3942         c->phys_disk = dev->phys_disk[map_index];
3943
3944         disk_handle = dd[map_index].ioaccel_handle;
3945         disk_block = le64_to_cpu(map->disk_starting_blk) +
3946                         first_row * le16_to_cpu(map->strip_size) +
3947                         (first_row_offset - first_column *
3948                         le16_to_cpu(map->strip_size));
3949         disk_block_cnt = block_cnt;
3950
3951         /* handle differing logical/physical block sizes */
3952         if (map->phys_blk_shift) {
3953                 disk_block <<= map->phys_blk_shift;
3954                 disk_block_cnt <<= map->phys_blk_shift;
3955         }
3956         BUG_ON(disk_block_cnt > 0xffff);
3957
3958         /* build the new CDB for the physical disk I/O */
3959         if (disk_block > 0xffffffff) {
3960                 cdb[0] = is_write ? WRITE_16 : READ_16;
3961                 cdb[1] = 0;
3962                 cdb[2] = (u8) (disk_block >> 56);
3963                 cdb[3] = (u8) (disk_block >> 48);
3964                 cdb[4] = (u8) (disk_block >> 40);
3965                 cdb[5] = (u8) (disk_block >> 32);
3966                 cdb[6] = (u8) (disk_block >> 24);
3967                 cdb[7] = (u8) (disk_block >> 16);
3968                 cdb[8] = (u8) (disk_block >> 8);
3969                 cdb[9] = (u8) (disk_block);
3970                 cdb[10] = (u8) (disk_block_cnt >> 24);
3971                 cdb[11] = (u8) (disk_block_cnt >> 16);
3972                 cdb[12] = (u8) (disk_block_cnt >> 8);
3973                 cdb[13] = (u8) (disk_block_cnt);
3974                 cdb[14] = 0;
3975                 cdb[15] = 0;
3976                 cdb_len = 16;
3977         } else {
3978                 cdb[0] = is_write ? WRITE_10 : READ_10;
3979                 cdb[1] = 0;
3980                 cdb[2] = (u8) (disk_block >> 24);
3981                 cdb[3] = (u8) (disk_block >> 16);
3982                 cdb[4] = (u8) (disk_block >> 8);
3983                 cdb[5] = (u8) (disk_block);
3984                 cdb[6] = 0;
3985                 cdb[7] = (u8) (disk_block_cnt >> 8);
3986                 cdb[8] = (u8) (disk_block_cnt);
3987                 cdb[9] = 0;
3988                 cdb_len = 10;
3989         }
3990         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3991                                                 dev->scsi3addr,
3992                                                 dev->phys_disk[map_index]);
3993 }
3994
3995 /* Submit commands down the "normal" RAID stack path */
3996 static int hpsa_ciss_submit(struct ctlr_info *h,
3997         struct CommandList *c, struct scsi_cmnd *cmd,
3998         unsigned char scsi3addr[])
3999 {
4000         cmd->host_scribble = (unsigned char *) c;
4001         c->cmd_type = CMD_SCSI;
4002         c->scsi_cmd = cmd;
4003         c->Header.ReplyQueue = 0;  /* unused in simple mode */
4004         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4005         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4006
4007         /* Fill in the request block... */
4008
4009         c->Request.Timeout = 0;
4010         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4011         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4012         c->Request.CDBLen = cmd->cmd_len;
4013         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4014         switch (cmd->sc_data_direction) {
4015         case DMA_TO_DEVICE:
4016                 c->Request.type_attr_dir =
4017                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4018                 break;
4019         case DMA_FROM_DEVICE:
4020                 c->Request.type_attr_dir =
4021                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4022                 break;
4023         case DMA_NONE:
4024                 c->Request.type_attr_dir =
4025                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4026                 break;
4027         case DMA_BIDIRECTIONAL:
4028                 /* This can happen if a buggy application does a scsi passthru
4029                  * and sets both inlen and outlen to non-zero. ( see
4030                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4031                  */
4032
4033                 c->Request.type_attr_dir =
4034                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4035                 /* This is technically wrong, and hpsa controllers should
4036                  * reject it with CMD_INVALID, which is the most correct
4037                  * response, but non-fibre backends appear to let it
4038                  * slide by, and give the same results as if this field
4039                  * were set correctly.  Either way is acceptable for
4040                  * our purposes here.
4041                  */
4042
4043                 break;
4044
4045         default:
4046                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4047                         cmd->sc_data_direction);
4048                 BUG();
4049                 break;
4050         }
4051
4052         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4053                 cmd_free(h, c);
4054                 return SCSI_MLQUEUE_HOST_BUSY;
4055         }
4056         enqueue_cmd_and_start_io(h, c);
4057         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4058         return 0;
4059 }
4060
4061 static void hpsa_command_resubmit_worker(struct work_struct *work)
4062 {
4063         struct scsi_cmnd *cmd;
4064         struct hpsa_scsi_dev_t *dev;
4065         struct CommandList *c =
4066                         container_of(work, struct CommandList, work);
4067
4068         cmd = c->scsi_cmd;
4069         dev = cmd->device->hostdata;
4070         if (!dev) {
4071                 cmd->result = DID_NO_CONNECT << 16;
4072                 cmd->scsi_done(cmd);
4073                 return;
4074         }
4075         if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4076                 /*
4077                  * If we get here, it means dma mapping failed. Try
4078                  * again via scsi mid layer, which will then get
4079                  * SCSI_MLQUEUE_HOST_BUSY.
4080                  */
4081                 cmd->result = DID_IMM_RETRY << 16;
4082                 cmd->scsi_done(cmd);
4083         }
4084 }
4085
4086 /* Running in struct Scsi_Host->host_lock less mode */
4087 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4088 {
4089         struct ctlr_info *h;
4090         struct hpsa_scsi_dev_t *dev;
4091         unsigned char scsi3addr[8];
4092         struct CommandList *c;
4093         int rc = 0;
4094
4095         /* Get the ptr to our adapter structure out of cmd->host. */
4096         h = sdev_to_hba(cmd->device);
4097         dev = cmd->device->hostdata;
4098         if (!dev) {
4099                 cmd->result = DID_NO_CONNECT << 16;
4100                 cmd->scsi_done(cmd);
4101                 return 0;
4102         }
4103         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4104
4105         if (unlikely(lockup_detected(h))) {
4106                 cmd->result = DID_ERROR << 16;
4107                 cmd->scsi_done(cmd);
4108                 return 0;
4109         }
4110         c = cmd_alloc(h);
4111         if (c == NULL) {                        /* trouble... */
4112                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4113                 return SCSI_MLQUEUE_HOST_BUSY;
4114         }
4115         if (unlikely(lockup_detected(h))) {
4116                 cmd->result = DID_ERROR << 16;
4117                 cmd_free(h, c);
4118                 cmd->scsi_done(cmd);
4119                 return 0;
4120         }
4121
4122         /*
4123          * Call alternate submit routine for I/O accelerated commands.
4124          * Retries always go down the normal I/O path.
4125          */
4126         if (likely(cmd->retries == 0 &&
4127                 cmd->request->cmd_type == REQ_TYPE_FS &&
4128                 h->acciopath_status)) {
4129
4130                 cmd->host_scribble = (unsigned char *) c;
4131                 c->cmd_type = CMD_SCSI;
4132                 c->scsi_cmd = cmd;
4133
4134                 if (dev->offload_enabled) {
4135                         rc = hpsa_scsi_ioaccel_raid_map(h, c);
4136                         if (rc == 0)
4137                                 return 0; /* Sent on ioaccel path */
4138                         if (rc < 0) {   /* scsi_dma_map failed. */
4139                                 cmd_free(h, c);
4140                                 return SCSI_MLQUEUE_HOST_BUSY;
4141                         }
4142                 } else if (dev->ioaccel_handle) {
4143                         rc = hpsa_scsi_ioaccel_direct_map(h, c);
4144                         if (rc == 0)
4145                                 return 0; /* Sent on direct map path */
4146                         if (rc < 0) {   /* scsi_dma_map failed. */
4147                                 cmd_free(h, c);
4148                                 return SCSI_MLQUEUE_HOST_BUSY;
4149                         }
4150                 }
4151         }
4152         return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4153 }
4154
4155 static void hpsa_scan_complete(struct ctlr_info *h)
4156 {
4157         unsigned long flags;
4158
4159         spin_lock_irqsave(&h->scan_lock, flags);
4160         h->scan_finished = 1;
4161         wake_up_all(&h->scan_wait_queue);
4162         spin_unlock_irqrestore(&h->scan_lock, flags);
4163 }
4164
4165 static void hpsa_scan_start(struct Scsi_Host *sh)
4166 {
4167         struct ctlr_info *h = shost_to_hba(sh);
4168         unsigned long flags;
4169
4170         /*
4171          * Don't let rescans be initiated on a controller known to be locked
4172          * up.  If the controller locks up *during* a rescan, that thread is
4173          * probably hosed, but at least we can prevent new rescan threads from
4174          * piling up on a locked up controller.
4175          */
4176         if (unlikely(lockup_detected(h)))
4177                 return hpsa_scan_complete(h);
4178
4179         /* wait until any scan already in progress is finished. */
4180         while (1) {
4181                 spin_lock_irqsave(&h->scan_lock, flags);
4182                 if (h->scan_finished)
4183                         break;
4184                 spin_unlock_irqrestore(&h->scan_lock, flags);
4185                 wait_event(h->scan_wait_queue, h->scan_finished);
4186                 /* Note: We don't need to worry about a race between this
4187                  * thread and driver unload because the midlayer will
4188                  * have incremented the reference count, so unload won't
4189                  * happen if we're in here.
4190                  */
4191         }
4192         h->scan_finished = 0; /* mark scan as in progress */
4193         spin_unlock_irqrestore(&h->scan_lock, flags);
4194
4195         if (unlikely(lockup_detected(h)))
4196                 return hpsa_scan_complete(h);
4197
4198         hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4199
4200         hpsa_scan_complete(h);
4201 }
4202
4203 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4204 {
4205         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4206
4207         if (!logical_drive)
4208                 return -ENODEV;
4209
4210         if (qdepth < 1)
4211                 qdepth = 1;
4212         else if (qdepth > logical_drive->queue_depth)
4213                 qdepth = logical_drive->queue_depth;
4214
4215         return scsi_change_queue_depth(sdev, qdepth);
4216 }
4217
4218 static int hpsa_scan_finished(struct Scsi_Host *sh,
4219         unsigned long elapsed_time)
4220 {
4221         struct ctlr_info *h = shost_to_hba(sh);
4222         unsigned long flags;
4223         int finished;
4224
4225         spin_lock_irqsave(&h->scan_lock, flags);
4226         finished = h->scan_finished;
4227         spin_unlock_irqrestore(&h->scan_lock, flags);
4228         return finished;
4229 }
4230
4231 static void hpsa_unregister_scsi(struct ctlr_info *h)
4232 {
4233         /* we are being forcibly unloaded, and may not refuse. */
4234         scsi_remove_host(h->scsi_host);
4235         scsi_host_put(h->scsi_host);
4236         h->scsi_host = NULL;
4237 }
4238
4239 static int hpsa_register_scsi(struct ctlr_info *h)
4240 {
4241         struct Scsi_Host *sh;
4242         int error;
4243
4244         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4245         if (sh == NULL)
4246                 goto fail;
4247
4248         sh->io_port = 0;
4249         sh->n_io_port = 0;
4250         sh->this_id = -1;
4251         sh->max_channel = 3;
4252         sh->max_cmd_len = MAX_COMMAND_SIZE;
4253         sh->max_lun = HPSA_MAX_LUN;
4254         sh->max_id = HPSA_MAX_LUN;
4255         sh->can_queue = h->nr_cmds -
4256                         HPSA_CMDS_RESERVED_FOR_ABORTS -
4257                         HPSA_CMDS_RESERVED_FOR_DRIVER -
4258                         HPSA_MAX_CONCURRENT_PASSTHRUS;
4259         sh->cmd_per_lun = sh->can_queue;
4260         sh->sg_tablesize = h->maxsgentries;
4261         h->scsi_host = sh;
4262         sh->hostdata[0] = (unsigned long) h;
4263         sh->irq = h->intr[h->intr_mode];
4264         sh->unique_id = sh->irq;
4265         error = scsi_add_host(sh, &h->pdev->dev);
4266         if (error)
4267                 goto fail_host_put;
4268         scsi_scan_host(sh);
4269         return 0;
4270
4271  fail_host_put:
4272         dev_err(&h->pdev->dev, "%s: scsi_add_host"
4273                 " failed for controller %d\n", __func__, h->ctlr);
4274         scsi_host_put(sh);
4275         return error;
4276  fail:
4277         dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4278                 " failed for controller %d\n", __func__, h->ctlr);
4279         return -ENOMEM;
4280 }
4281
4282 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4283         unsigned char lunaddr[])
4284 {
4285         int rc;
4286         int count = 0;
4287         int waittime = 1; /* seconds */
4288         struct CommandList *c;
4289
4290         c = cmd_alloc(h);
4291         if (!c) {
4292                 dev_warn(&h->pdev->dev, "out of memory in "
4293                         "wait_for_device_to_become_ready.\n");
4294                 return IO_ERROR;
4295         }
4296
4297         /* Send test unit ready until device ready, or give up. */
4298         while (count < HPSA_TUR_RETRY_LIMIT) {
4299
4300                 /* Wait for a bit.  do this first, because if we send
4301                  * the TUR right away, the reset will just abort it.
4302                  */
4303                 msleep(1000 * waittime);
4304                 count++;
4305                 rc = 0; /* Device ready. */
4306
4307                 /* Increase wait time with each try, up to a point. */
4308                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4309                         waittime = waittime * 2;
4310
4311                 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4312                 (void) fill_cmd(c, TEST_UNIT_READY, h,
4313                                 NULL, 0, 0, lunaddr, TYPE_CMD);
4314                 hpsa_scsi_do_simple_cmd_core(h, c);
4315                 /* no unmap needed here because no data xfer. */
4316
4317                 if (c->err_info->CommandStatus == CMD_SUCCESS)
4318                         break;
4319
4320                 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4321                         c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4322                         (c->err_info->SenseInfo[2] == NO_SENSE ||
4323                         c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4324                         break;
4325
4326                 dev_warn(&h->pdev->dev, "waiting %d secs "
4327                         "for device to become ready.\n", waittime);
4328                 rc = 1; /* device not ready. */
4329         }
4330
4331         if (rc)
4332                 dev_warn(&h->pdev->dev, "giving up on device.\n");
4333         else
4334                 dev_warn(&h->pdev->dev, "device is ready.\n");
4335
4336         cmd_free(h, c);
4337         return rc;
4338 }
4339
4340 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4341  * complaining.  Doing a host- or bus-reset can't do anything good here.
4342  */
4343 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4344 {
4345         int rc;
4346         struct ctlr_info *h;
4347         struct hpsa_scsi_dev_t *dev;
4348
4349         /* find the controller to which the command to be aborted was sent */
4350         h = sdev_to_hba(scsicmd->device);
4351         if (h == NULL) /* paranoia */
4352                 return FAILED;
4353
4354         if (lockup_detected(h))
4355                 return FAILED;
4356
4357         dev = scsicmd->device->hostdata;
4358         if (!dev) {
4359                 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4360                         "device lookup failed.\n");
4361                 return FAILED;
4362         }
4363         dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4364                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4365         /* send a reset to the SCSI LUN which the command was sent to */
4366         rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4367         if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4368                 return SUCCESS;
4369
4370         dev_warn(&h->pdev->dev, "resetting device failed.\n");
4371         return FAILED;
4372 }
4373
4374 static void swizzle_abort_tag(u8 *tag)
4375 {
4376         u8 original_tag[8];
4377
4378         memcpy(original_tag, tag, 8);
4379         tag[0] = original_tag[3];
4380         tag[1] = original_tag[2];
4381         tag[2] = original_tag[1];
4382         tag[3] = original_tag[0];
4383         tag[4] = original_tag[7];
4384         tag[5] = original_tag[6];
4385         tag[6] = original_tag[5];
4386         tag[7] = original_tag[4];
4387 }
4388
4389 static void hpsa_get_tag(struct ctlr_info *h,
4390         struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4391 {
4392         u64 tag;
4393         if (c->cmd_type == CMD_IOACCEL1) {
4394                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4395                         &h->ioaccel_cmd_pool[c->cmdindex];
4396                 tag = le64_to_cpu(cm1->tag);
4397                 *tagupper = cpu_to_le32(tag >> 32);
4398                 *taglower = cpu_to_le32(tag);
4399                 return;
4400         }
4401         if (c->cmd_type == CMD_IOACCEL2) {
4402                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4403                         &h->ioaccel2_cmd_pool[c->cmdindex];
4404                 /* upper tag not used in ioaccel2 mode */
4405                 memset(tagupper, 0, sizeof(*tagupper));
4406                 *taglower = cm2->Tag;
4407                 return;
4408         }
4409         tag = le64_to_cpu(c->Header.tag);
4410         *tagupper = cpu_to_le32(tag >> 32);
4411         *taglower = cpu_to_le32(tag);
4412 }
4413
4414 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4415         struct CommandList *abort, int swizzle)
4416 {
4417         int rc = IO_OK;
4418         struct CommandList *c;
4419         struct ErrorInfo *ei;
4420         __le32 tagupper, taglower;
4421
4422         c = cmd_alloc(h);
4423         if (c == NULL) {        /* trouble... */
4424                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4425                 return -ENOMEM;
4426         }
4427
4428         /* fill_cmd can't fail here, no buffer to map */
4429         (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4430                 0, 0, scsi3addr, TYPE_MSG);
4431         if (swizzle)
4432                 swizzle_abort_tag(&c->Request.CDB[4]);
4433         hpsa_scsi_do_simple_cmd_core(h, c);
4434         hpsa_get_tag(h, abort, &taglower, &tagupper);
4435         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4436                 __func__, tagupper, taglower);
4437         /* no unmap needed here because no data xfer. */
4438
4439         ei = c->err_info;
4440         switch (ei->CommandStatus) {
4441         case CMD_SUCCESS:
4442                 break;
4443         case CMD_UNABORTABLE: /* Very common, don't make noise. */
4444                 rc = -1;
4445                 break;
4446         default:
4447                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4448                         __func__, tagupper, taglower);
4449                 hpsa_scsi_interpret_error(h, c);
4450                 rc = -1;
4451                 break;
4452         }
4453         cmd_free(h, c);
4454         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4455                 __func__, tagupper, taglower);
4456         return rc;
4457 }
4458
4459 /* ioaccel2 path firmware cannot handle abort task requests.
4460  * Change abort requests to physical target reset, and send to the
4461  * address of the physical disk used for the ioaccel 2 command.
4462  * Return 0 on success (IO_OK)
4463  *       -1 on failure
4464  */
4465
4466 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4467         unsigned char *scsi3addr, struct CommandList *abort)
4468 {
4469         int rc = IO_OK;
4470         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4471         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4472         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4473         unsigned char *psa = &phys_scsi3addr[0];
4474
4475         /* Get a pointer to the hpsa logical device. */
4476         scmd = abort->scsi_cmd;
4477         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4478         if (dev == NULL) {
4479                 dev_warn(&h->pdev->dev,
4480                         "Cannot abort: no device pointer for command.\n");
4481                         return -1; /* not abortable */
4482         }
4483
4484         if (h->raid_offload_debug > 0)
4485                 dev_info(&h->pdev->dev,
4486                         "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4487                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4488                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4489                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4490
4491         if (!dev->offload_enabled) {
4492                 dev_warn(&h->pdev->dev,
4493                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4494                 return -1; /* not abortable */
4495         }
4496
4497         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4498         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4499                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4500                 return -1; /* not abortable */
4501         }
4502
4503         /* send the reset */
4504         if (h->raid_offload_debug > 0)
4505                 dev_info(&h->pdev->dev,
4506                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4507                         psa[0], psa[1], psa[2], psa[3],
4508                         psa[4], psa[5], psa[6], psa[7]);
4509         rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4510         if (rc != 0) {
4511                 dev_warn(&h->pdev->dev,
4512                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4513                         psa[0], psa[1], psa[2], psa[3],
4514                         psa[4], psa[5], psa[6], psa[7]);
4515                 return rc; /* failed to reset */
4516         }
4517
4518         /* wait for device to recover */
4519         if (wait_for_device_to_become_ready(h, psa) != 0) {
4520                 dev_warn(&h->pdev->dev,
4521                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4522                         psa[0], psa[1], psa[2], psa[3],
4523                         psa[4], psa[5], psa[6], psa[7]);
4524                 return -1;  /* failed to recover */
4525         }
4526
4527         /* device recovered */
4528         dev_info(&h->pdev->dev,
4529                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4530                 psa[0], psa[1], psa[2], psa[3],
4531                 psa[4], psa[5], psa[6], psa[7]);
4532
4533         return rc; /* success */
4534 }
4535
4536 /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
4537  * tell which kind we're dealing with, so we send the abort both ways.  There
4538  * shouldn't be any collisions between swizzled and unswizzled tags due to the
4539  * way we construct our tags but we check anyway in case the assumptions which
4540  * make this true someday become false.
4541  */
4542 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4543         unsigned char *scsi3addr, struct CommandList *abort)
4544 {
4545         /* ioccelerator mode 2 commands should be aborted via the
4546          * accelerated path, since RAID path is unaware of these commands,
4547          * but underlying firmware can't handle abort TMF.
4548          * Change abort to physical device reset.
4549          */
4550         if (abort->cmd_type == CMD_IOACCEL2)
4551                 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4552
4553         return hpsa_send_abort(h, scsi3addr, abort, 0) &&
4554                         hpsa_send_abort(h, scsi3addr, abort, 1);
4555 }
4556
4557 /* Send an abort for the specified command.
4558  *      If the device and controller support it,
4559  *              send a task abort request.
4560  */
4561 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4562 {
4563
4564         int i, rc;
4565         struct ctlr_info *h;
4566         struct hpsa_scsi_dev_t *dev;
4567         struct CommandList *abort; /* pointer to command to be aborted */
4568         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
4569         char msg[256];          /* For debug messaging. */
4570         int ml = 0;
4571         __le32 tagupper, taglower;
4572         int refcount;
4573
4574         /* Find the controller of the command to be aborted */
4575         h = sdev_to_hba(sc->device);
4576         if (WARN(h == NULL,
4577                         "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4578                 return FAILED;
4579
4580         if (lockup_detected(h))
4581                 return FAILED;
4582
4583         /* Check that controller supports some kind of task abort */
4584         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4585                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4586                 return FAILED;
4587
4588         memset(msg, 0, sizeof(msg));
4589         ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4590                 h->scsi_host->host_no, sc->device->channel,
4591                 sc->device->id, sc->device->lun);
4592
4593         /* Find the device of the command to be aborted */
4594         dev = sc->device->hostdata;
4595         if (!dev) {
4596                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4597                                 msg);
4598                 return FAILED;
4599         }
4600
4601         /* Get SCSI command to be aborted */
4602         abort = (struct CommandList *) sc->host_scribble;
4603         if (abort == NULL) {
4604                 /* This can happen if the command already completed. */
4605                 return SUCCESS;
4606         }
4607         refcount = atomic_inc_return(&abort->refcount);
4608         if (refcount == 1) { /* Command is done already. */
4609                 cmd_free(h, abort);
4610                 return SUCCESS;
4611         }
4612         hpsa_get_tag(h, abort, &taglower, &tagupper);
4613         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4614         as  = abort->scsi_cmd;
4615         if (as != NULL)
4616                 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4617                         as->cmnd[0], as->serial_number);
4618         dev_dbg(&h->pdev->dev, "%s\n", msg);
4619         dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4620                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4621         /*
4622          * Command is in flight, or possibly already completed
4623          * by the firmware (but not to the scsi mid layer) but we can't
4624          * distinguish which.  Send the abort down.
4625          */
4626         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4627         if (rc != 0) {
4628                 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4629                 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4630                         h->scsi_host->host_no,
4631                         dev->bus, dev->target, dev->lun);
4632                 cmd_free(h, abort);
4633                 return FAILED;
4634         }
4635         dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4636
4637         /* If the abort(s) above completed and actually aborted the
4638          * command, then the command to be aborted should already be
4639          * completed.  If not, wait around a bit more to see if they
4640          * manage to complete normally.
4641          */
4642 #define ABORT_COMPLETE_WAIT_SECS 30
4643         for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4644                 refcount = atomic_read(&abort->refcount);
4645                 if (refcount < 2) {
4646                         cmd_free(h, abort);
4647                         return SUCCESS;
4648                 } else {
4649                         msleep(100);
4650                 }
4651         }
4652         dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4653                 msg, ABORT_COMPLETE_WAIT_SECS);
4654         cmd_free(h, abort);
4655         return FAILED;
4656 }
4657
4658 /*
4659  * For operations that cannot sleep, a command block is allocated at init,
4660  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4661  * which ones are free or in use.  Lock must be held when calling this.
4662  * cmd_free() is the complement.
4663  */
4664
4665 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4666 {
4667         struct CommandList *c;
4668         int i;
4669         union u64bit temp64;
4670         dma_addr_t cmd_dma_handle, err_dma_handle;
4671         int refcount;
4672         unsigned long offset;
4673
4674         /*
4675          * There is some *extremely* small but non-zero chance that that
4676          * multiple threads could get in here, and one thread could
4677          * be scanning through the list of bits looking for a free
4678          * one, but the free ones are always behind him, and other
4679          * threads sneak in behind him and eat them before he can
4680          * get to them, so that while there is always a free one, a
4681          * very unlucky thread might be starved anyway, never able to
4682          * beat the other threads.  In reality, this happens so
4683          * infrequently as to be indistinguishable from never.
4684          */
4685
4686         offset = h->last_allocation; /* benignly racy */
4687         for (;;) {
4688                 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
4689                 if (unlikely(i == h->nr_cmds)) {
4690                         offset = 0;
4691                         continue;
4692                 }
4693                 c = h->cmd_pool + i;
4694                 refcount = atomic_inc_return(&c->refcount);
4695                 if (unlikely(refcount > 1)) {
4696                         cmd_free(h, c); /* already in use */
4697                         offset = (i + 1) % h->nr_cmds;
4698                         continue;
4699                 }
4700                 set_bit(i & (BITS_PER_LONG - 1),
4701                         h->cmd_pool_bits + (i / BITS_PER_LONG));
4702                 break; /* it's ours now. */
4703         }
4704         h->last_allocation = i; /* benignly racy */
4705
4706         /* Zero out all of commandlist except the last field, refcount */
4707         memset(c, 0, offsetof(struct CommandList, refcount));
4708         c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
4709         cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
4710         c->err_info = h->errinfo_pool + i;
4711         memset(c->err_info, 0, sizeof(*c->err_info));
4712         err_dma_handle = h->errinfo_pool_dhandle
4713             + i * sizeof(*c->err_info);
4714
4715         c->cmdindex = i;
4716
4717         c->busaddr = (u32) cmd_dma_handle;
4718         temp64.val = (u64) err_dma_handle;
4719         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4720         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4721
4722         c->h = h;
4723         return c;
4724 }
4725
4726 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4727 {
4728         if (atomic_dec_and_test(&c->refcount)) {
4729                 int i;
4730
4731                 i = c - h->cmd_pool;
4732                 clear_bit(i & (BITS_PER_LONG - 1),
4733                           h->cmd_pool_bits + (i / BITS_PER_LONG));
4734         }
4735 }
4736
4737 #ifdef CONFIG_COMPAT
4738
4739 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4740         void __user *arg)
4741 {
4742         IOCTL32_Command_struct __user *arg32 =
4743             (IOCTL32_Command_struct __user *) arg;
4744         IOCTL_Command_struct arg64;
4745         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4746         int err;
4747         u32 cp;
4748
4749         memset(&arg64, 0, sizeof(arg64));
4750         err = 0;
4751         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4752                            sizeof(arg64.LUN_info));
4753         err |= copy_from_user(&arg64.Request, &arg32->Request,
4754                            sizeof(arg64.Request));
4755         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4756                            sizeof(arg64.error_info));
4757         err |= get_user(arg64.buf_size, &arg32->buf_size);
4758         err |= get_user(cp, &arg32->buf);
4759         arg64.buf = compat_ptr(cp);
4760         err |= copy_to_user(p, &arg64, sizeof(arg64));
4761
4762         if (err)
4763                 return -EFAULT;
4764
4765         err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4766         if (err)
4767                 return err;
4768         err |= copy_in_user(&arg32->error_info, &p->error_info,
4769                          sizeof(arg32->error_info));
4770         if (err)
4771                 return -EFAULT;
4772         return err;
4773 }
4774
4775 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4776         int cmd, void __user *arg)
4777 {
4778         BIG_IOCTL32_Command_struct __user *arg32 =
4779             (BIG_IOCTL32_Command_struct __user *) arg;
4780         BIG_IOCTL_Command_struct arg64;
4781         BIG_IOCTL_Command_struct __user *p =
4782             compat_alloc_user_space(sizeof(arg64));
4783         int err;
4784         u32 cp;
4785
4786         memset(&arg64, 0, sizeof(arg64));
4787         err = 0;
4788         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4789                            sizeof(arg64.LUN_info));
4790         err |= copy_from_user(&arg64.Request, &arg32->Request,
4791                            sizeof(arg64.Request));
4792         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4793                            sizeof(arg64.error_info));
4794         err |= get_user(arg64.buf_size, &arg32->buf_size);
4795         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4796         err |= get_user(cp, &arg32->buf);
4797         arg64.buf = compat_ptr(cp);
4798         err |= copy_to_user(p, &arg64, sizeof(arg64));
4799
4800         if (err)
4801                 return -EFAULT;
4802
4803         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4804         if (err)
4805                 return err;
4806         err |= copy_in_user(&arg32->error_info, &p->error_info,
4807                          sizeof(arg32->error_info));
4808         if (err)
4809                 return -EFAULT;
4810         return err;
4811 }
4812
4813 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4814 {
4815         switch (cmd) {
4816         case CCISS_GETPCIINFO:
4817         case CCISS_GETINTINFO:
4818         case CCISS_SETINTINFO:
4819         case CCISS_GETNODENAME:
4820         case CCISS_SETNODENAME:
4821         case CCISS_GETHEARTBEAT:
4822         case CCISS_GETBUSTYPES:
4823         case CCISS_GETFIRMVER:
4824         case CCISS_GETDRIVVER:
4825         case CCISS_REVALIDVOLS:
4826         case CCISS_DEREGDISK:
4827         case CCISS_REGNEWDISK:
4828         case CCISS_REGNEWD:
4829         case CCISS_RESCANDISK:
4830         case CCISS_GETLUNINFO:
4831                 return hpsa_ioctl(dev, cmd, arg);
4832
4833         case CCISS_PASSTHRU32:
4834                 return hpsa_ioctl32_passthru(dev, cmd, arg);
4835         case CCISS_BIG_PASSTHRU32:
4836                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4837
4838         default:
4839                 return -ENOIOCTLCMD;
4840         }
4841 }
4842 #endif
4843
4844 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4845 {
4846         struct hpsa_pci_info pciinfo;
4847
4848         if (!argp)
4849                 return -EINVAL;
4850         pciinfo.domain = pci_domain_nr(h->pdev->bus);
4851         pciinfo.bus = h->pdev->bus->number;
4852         pciinfo.dev_fn = h->pdev->devfn;
4853         pciinfo.board_id = h->board_id;
4854         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4855                 return -EFAULT;
4856         return 0;
4857 }
4858
4859 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4860 {
4861         DriverVer_type DriverVer;
4862         unsigned char vmaj, vmin, vsubmin;
4863         int rc;
4864
4865         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4866                 &vmaj, &vmin, &vsubmin);
4867         if (rc != 3) {
4868                 dev_info(&h->pdev->dev, "driver version string '%s' "
4869                         "unrecognized.", HPSA_DRIVER_VERSION);
4870                 vmaj = 0;
4871                 vmin = 0;
4872                 vsubmin = 0;
4873         }
4874         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4875         if (!argp)
4876                 return -EINVAL;
4877         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4878                 return -EFAULT;
4879         return 0;
4880 }
4881
4882 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4883 {
4884         IOCTL_Command_struct iocommand;
4885         struct CommandList *c;
4886         char *buff = NULL;
4887         u64 temp64;
4888         int rc = 0;
4889
4890         if (!argp)
4891                 return -EINVAL;
4892         if (!capable(CAP_SYS_RAWIO))
4893                 return -EPERM;
4894         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4895                 return -EFAULT;
4896         if ((iocommand.buf_size < 1) &&
4897             (iocommand.Request.Type.Direction != XFER_NONE)) {
4898                 return -EINVAL;
4899         }
4900         if (iocommand.buf_size > 0) {
4901                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4902                 if (buff == NULL)
4903                         return -EFAULT;
4904                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4905                         /* Copy the data into the buffer we created */
4906                         if (copy_from_user(buff, iocommand.buf,
4907                                 iocommand.buf_size)) {
4908                                 rc = -EFAULT;
4909                                 goto out_kfree;
4910                         }
4911                 } else {
4912                         memset(buff, 0, iocommand.buf_size);
4913                 }
4914         }
4915         c = cmd_alloc(h);
4916         if (c == NULL) {
4917                 rc = -ENOMEM;
4918                 goto out_kfree;
4919         }
4920         /* Fill in the command type */
4921         c->cmd_type = CMD_IOCTL_PEND;
4922         /* Fill in Command Header */
4923         c->Header.ReplyQueue = 0; /* unused in simple mode */
4924         if (iocommand.buf_size > 0) {   /* buffer to fill */
4925                 c->Header.SGList = 1;
4926                 c->Header.SGTotal = cpu_to_le16(1);
4927         } else  { /* no buffers to fill */
4928                 c->Header.SGList = 0;
4929                 c->Header.SGTotal = cpu_to_le16(0);
4930         }
4931         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4932
4933         /* Fill in Request block */
4934         memcpy(&c->Request, &iocommand.Request,
4935                 sizeof(c->Request));
4936
4937         /* Fill in the scatter gather information */
4938         if (iocommand.buf_size > 0) {
4939                 temp64 = pci_map_single(h->pdev, buff,
4940                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4941                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4942                         c->SG[0].Addr = cpu_to_le64(0);
4943                         c->SG[0].Len = cpu_to_le32(0);
4944                         rc = -ENOMEM;
4945                         goto out;
4946                 }
4947                 c->SG[0].Addr = cpu_to_le64(temp64);
4948                 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
4949                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
4950         }
4951         hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
4952         if (iocommand.buf_size > 0)
4953                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
4954         check_ioctl_unit_attention(h, c);
4955
4956         /* Copy the error information out */
4957         memcpy(&iocommand.error_info, c->err_info,
4958                 sizeof(iocommand.error_info));
4959         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
4960                 rc = -EFAULT;
4961                 goto out;
4962         }
4963         if ((iocommand.Request.Type.Direction & XFER_READ) &&
4964                 iocommand.buf_size > 0) {
4965                 /* Copy the data out of the buffer we created */
4966                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
4967                         rc = -EFAULT;
4968                         goto out;
4969                 }
4970         }
4971 out:
4972         cmd_free(h, c);
4973 out_kfree:
4974         kfree(buff);
4975         return rc;
4976 }
4977
4978 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4979 {
4980         BIG_IOCTL_Command_struct *ioc;
4981         struct CommandList *c;
4982         unsigned char **buff = NULL;
4983         int *buff_size = NULL;
4984         u64 temp64;
4985         BYTE sg_used = 0;
4986         int status = 0;
4987         u32 left;
4988         u32 sz;
4989         BYTE __user *data_ptr;
4990
4991         if (!argp)
4992                 return -EINVAL;
4993         if (!capable(CAP_SYS_RAWIO))
4994                 return -EPERM;
4995         ioc = (BIG_IOCTL_Command_struct *)
4996             kmalloc(sizeof(*ioc), GFP_KERNEL);
4997         if (!ioc) {
4998                 status = -ENOMEM;
4999                 goto cleanup1;
5000         }
5001         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5002                 status = -EFAULT;
5003                 goto cleanup1;
5004         }
5005         if ((ioc->buf_size < 1) &&
5006             (ioc->Request.Type.Direction != XFER_NONE)) {
5007                 status = -EINVAL;
5008                 goto cleanup1;
5009         }
5010         /* Check kmalloc limits  using all SGs */
5011         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5012                 status = -EINVAL;
5013                 goto cleanup1;
5014         }
5015         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5016                 status = -EINVAL;
5017                 goto cleanup1;
5018         }
5019         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5020         if (!buff) {
5021                 status = -ENOMEM;
5022                 goto cleanup1;
5023         }
5024         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5025         if (!buff_size) {
5026                 status = -ENOMEM;
5027                 goto cleanup1;
5028         }
5029         left = ioc->buf_size;
5030         data_ptr = ioc->buf;
5031         while (left) {
5032                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5033                 buff_size[sg_used] = sz;
5034                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5035                 if (buff[sg_used] == NULL) {
5036                         status = -ENOMEM;
5037                         goto cleanup1;
5038                 }
5039                 if (ioc->Request.Type.Direction & XFER_WRITE) {
5040                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5041                                 status = -EFAULT;
5042                                 goto cleanup1;
5043                         }
5044                 } else
5045                         memset(buff[sg_used], 0, sz);
5046                 left -= sz;
5047                 data_ptr += sz;
5048                 sg_used++;
5049         }
5050         c = cmd_alloc(h);
5051         if (c == NULL) {
5052                 status = -ENOMEM;
5053                 goto cleanup1;
5054         }
5055         c->cmd_type = CMD_IOCTL_PEND;
5056         c->Header.ReplyQueue = 0;
5057         c->Header.SGList = (u8) sg_used;
5058         c->Header.SGTotal = cpu_to_le16(sg_used);
5059         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5060         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5061         if (ioc->buf_size > 0) {
5062                 int i;
5063                 for (i = 0; i < sg_used; i++) {
5064                         temp64 = pci_map_single(h->pdev, buff[i],
5065                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
5066                         if (dma_mapping_error(&h->pdev->dev,
5067                                                         (dma_addr_t) temp64)) {
5068                                 c->SG[i].Addr = cpu_to_le64(0);
5069                                 c->SG[i].Len = cpu_to_le32(0);
5070                                 hpsa_pci_unmap(h->pdev, c, i,
5071                                         PCI_DMA_BIDIRECTIONAL);
5072                                 status = -ENOMEM;
5073                                 goto cleanup0;
5074                         }
5075                         c->SG[i].Addr = cpu_to_le64(temp64);
5076                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
5077                         c->SG[i].Ext = cpu_to_le32(0);
5078                 }
5079                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5080         }
5081         hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5082         if (sg_used)
5083                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5084         check_ioctl_unit_attention(h, c);
5085         /* Copy the error information out */
5086         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5087         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5088                 status = -EFAULT;
5089                 goto cleanup0;
5090         }
5091         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5092                 int i;
5093
5094                 /* Copy the data out of the buffer we created */
5095                 BYTE __user *ptr = ioc->buf;
5096                 for (i = 0; i < sg_used; i++) {
5097                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
5098                                 status = -EFAULT;
5099                                 goto cleanup0;
5100                         }
5101                         ptr += buff_size[i];
5102                 }
5103         }
5104         status = 0;
5105 cleanup0:
5106         cmd_free(h, c);
5107 cleanup1:
5108         if (buff) {
5109                 int i;
5110
5111                 for (i = 0; i < sg_used; i++)
5112                         kfree(buff[i]);
5113                 kfree(buff);
5114         }
5115         kfree(buff_size);
5116         kfree(ioc);
5117         return status;
5118 }
5119
5120 static void check_ioctl_unit_attention(struct ctlr_info *h,
5121         struct CommandList *c)
5122 {
5123         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5124                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5125                 (void) check_for_unit_attention(h, c);
5126 }
5127
5128 /*
5129  * ioctl
5130  */
5131 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5132 {
5133         struct ctlr_info *h;
5134         void __user *argp = (void __user *)arg;
5135         int rc;
5136
5137         h = sdev_to_hba(dev);
5138
5139         switch (cmd) {
5140         case CCISS_DEREGDISK:
5141         case CCISS_REGNEWDISK:
5142         case CCISS_REGNEWD:
5143                 hpsa_scan_start(h->scsi_host);
5144                 return 0;
5145         case CCISS_GETPCIINFO:
5146                 return hpsa_getpciinfo_ioctl(h, argp);
5147         case CCISS_GETDRIVVER:
5148                 return hpsa_getdrivver_ioctl(h, argp);
5149         case CCISS_PASSTHRU:
5150                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5151                         return -EAGAIN;
5152                 rc = hpsa_passthru_ioctl(h, argp);
5153                 atomic_inc(&h->passthru_cmds_avail);
5154                 return rc;
5155         case CCISS_BIG_PASSTHRU:
5156                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5157                         return -EAGAIN;
5158                 rc = hpsa_big_passthru_ioctl(h, argp);
5159                 atomic_inc(&h->passthru_cmds_avail);
5160                 return rc;
5161         default:
5162                 return -ENOTTY;
5163         }
5164 }
5165
5166 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5167                                 u8 reset_type)
5168 {
5169         struct CommandList *c;
5170
5171         c = cmd_alloc(h);
5172         if (!c)
5173                 return -ENOMEM;
5174         /* fill_cmd can't fail here, no data buffer to map */
5175         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5176                 RAID_CTLR_LUNID, TYPE_MSG);
5177         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5178         c->waiting = NULL;
5179         enqueue_cmd_and_start_io(h, c);
5180         /* Don't wait for completion, the reset won't complete.  Don't free
5181          * the command either.  This is the last command we will send before
5182          * re-initializing everything, so it doesn't matter and won't leak.
5183          */
5184         return 0;
5185 }
5186
5187 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5188         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5189         int cmd_type)
5190 {
5191         int pci_dir = XFER_NONE;
5192         struct CommandList *a; /* for commands to be aborted */
5193
5194         c->cmd_type = CMD_IOCTL_PEND;
5195         c->Header.ReplyQueue = 0;
5196         if (buff != NULL && size > 0) {
5197                 c->Header.SGList = 1;
5198                 c->Header.SGTotal = cpu_to_le16(1);
5199         } else {
5200                 c->Header.SGList = 0;
5201                 c->Header.SGTotal = cpu_to_le16(0);
5202         }
5203         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5204
5205         if (cmd_type == TYPE_CMD) {
5206                 switch (cmd) {
5207                 case HPSA_INQUIRY:
5208                         /* are we trying to read a vital product page */
5209                         if (page_code & VPD_PAGE) {
5210                                 c->Request.CDB[1] = 0x01;
5211                                 c->Request.CDB[2] = (page_code & 0xff);
5212                         }
5213                         c->Request.CDBLen = 6;
5214                         c->Request.type_attr_dir =
5215                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5216                         c->Request.Timeout = 0;
5217                         c->Request.CDB[0] = HPSA_INQUIRY;
5218                         c->Request.CDB[4] = size & 0xFF;
5219                         break;
5220                 case HPSA_REPORT_LOG:
5221                 case HPSA_REPORT_PHYS:
5222                         /* Talking to controller so It's a physical command
5223                            mode = 00 target = 0.  Nothing to write.
5224                          */
5225                         c->Request.CDBLen = 12;
5226                         c->Request.type_attr_dir =
5227                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5228                         c->Request.Timeout = 0;
5229                         c->Request.CDB[0] = cmd;
5230                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5231                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5232                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5233                         c->Request.CDB[9] = size & 0xFF;
5234                         break;
5235                 case HPSA_CACHE_FLUSH:
5236                         c->Request.CDBLen = 12;
5237                         c->Request.type_attr_dir =
5238                                         TYPE_ATTR_DIR(cmd_type,
5239                                                 ATTR_SIMPLE, XFER_WRITE);
5240                         c->Request.Timeout = 0;
5241                         c->Request.CDB[0] = BMIC_WRITE;
5242                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5243                         c->Request.CDB[7] = (size >> 8) & 0xFF;
5244                         c->Request.CDB[8] = size & 0xFF;
5245                         break;
5246                 case TEST_UNIT_READY:
5247                         c->Request.CDBLen = 6;
5248                         c->Request.type_attr_dir =
5249                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5250                         c->Request.Timeout = 0;
5251                         break;
5252                 case HPSA_GET_RAID_MAP:
5253                         c->Request.CDBLen = 12;
5254                         c->Request.type_attr_dir =
5255                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5256                         c->Request.Timeout = 0;
5257                         c->Request.CDB[0] = HPSA_CISS_READ;
5258                         c->Request.CDB[1] = cmd;
5259                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5260                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5261                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5262                         c->Request.CDB[9] = size & 0xFF;
5263                         break;
5264                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5265                         c->Request.CDBLen = 10;
5266                         c->Request.type_attr_dir =
5267                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5268                         c->Request.Timeout = 0;
5269                         c->Request.CDB[0] = BMIC_READ;
5270                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5271                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5272                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5273                         break;
5274                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5275                         c->Request.CDBLen = 10;
5276                         c->Request.type_attr_dir =
5277                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5278                         c->Request.Timeout = 0;
5279                         c->Request.CDB[0] = BMIC_READ;
5280                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5281                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5282                         c->Request.CDB[8] = (size >> 8) & 0XFF;
5283                         break;
5284                 default:
5285                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5286                         BUG();
5287                         return -1;
5288                 }
5289         } else if (cmd_type == TYPE_MSG) {
5290                 switch (cmd) {
5291
5292                 case  HPSA_DEVICE_RESET_MSG:
5293                         c->Request.CDBLen = 16;
5294                         c->Request.type_attr_dir =
5295                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5296                         c->Request.Timeout = 0; /* Don't time out */
5297                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5298                         c->Request.CDB[0] =  cmd;
5299                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5300                         /* If bytes 4-7 are zero, it means reset the */
5301                         /* LunID device */
5302                         c->Request.CDB[4] = 0x00;
5303                         c->Request.CDB[5] = 0x00;
5304                         c->Request.CDB[6] = 0x00;
5305                         c->Request.CDB[7] = 0x00;
5306                         break;
5307                 case  HPSA_ABORT_MSG:
5308                         a = buff;       /* point to command to be aborted */
5309                         dev_dbg(&h->pdev->dev,
5310                                 "Abort Tag:0x%016llx request Tag:0x%016llx",
5311                                 a->Header.tag, c->Header.tag);
5312                         c->Request.CDBLen = 16;
5313                         c->Request.type_attr_dir =
5314                                         TYPE_ATTR_DIR(cmd_type,
5315                                                 ATTR_SIMPLE, XFER_WRITE);
5316                         c->Request.Timeout = 0; /* Don't time out */
5317                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5318                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5319                         c->Request.CDB[2] = 0x00; /* reserved */
5320                         c->Request.CDB[3] = 0x00; /* reserved */
5321                         /* Tag to abort goes in CDB[4]-CDB[11] */
5322                         memcpy(&c->Request.CDB[4], &a->Header.tag,
5323                                 sizeof(a->Header.tag));
5324                         c->Request.CDB[12] = 0x00; /* reserved */
5325                         c->Request.CDB[13] = 0x00; /* reserved */
5326                         c->Request.CDB[14] = 0x00; /* reserved */
5327                         c->Request.CDB[15] = 0x00; /* reserved */
5328                 break;
5329                 default:
5330                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
5331                                 cmd);
5332                         BUG();
5333                 }
5334         } else {
5335                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5336                 BUG();
5337         }
5338
5339         switch (GET_DIR(c->Request.type_attr_dir)) {
5340         case XFER_READ:
5341                 pci_dir = PCI_DMA_FROMDEVICE;
5342                 break;
5343         case XFER_WRITE:
5344                 pci_dir = PCI_DMA_TODEVICE;
5345                 break;
5346         case XFER_NONE:
5347                 pci_dir = PCI_DMA_NONE;
5348                 break;
5349         default:
5350                 pci_dir = PCI_DMA_BIDIRECTIONAL;
5351         }
5352         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5353                 return -1;
5354         return 0;
5355 }
5356
5357 /*
5358  * Map (physical) PCI mem into (virtual) kernel space
5359  */
5360 static void __iomem *remap_pci_mem(ulong base, ulong size)
5361 {
5362         ulong page_base = ((ulong) base) & PAGE_MASK;
5363         ulong page_offs = ((ulong) base) - page_base;
5364         void __iomem *page_remapped = ioremap_nocache(page_base,
5365                 page_offs + size);
5366
5367         return page_remapped ? (page_remapped + page_offs) : NULL;
5368 }
5369
5370 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5371 {
5372         return h->access.command_completed(h, q);
5373 }
5374
5375 static inline bool interrupt_pending(struct ctlr_info *h)
5376 {
5377         return h->access.intr_pending(h);
5378 }
5379
5380 static inline long interrupt_not_for_us(struct ctlr_info *h)
5381 {
5382         return (h->access.intr_pending(h) == 0) ||
5383                 (h->interrupts_enabled == 0);
5384 }
5385
5386 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5387         u32 raw_tag)
5388 {
5389         if (unlikely(tag_index >= h->nr_cmds)) {
5390                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5391                 return 1;
5392         }
5393         return 0;
5394 }
5395
5396 static inline void finish_cmd(struct CommandList *c)
5397 {
5398         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5399         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5400                         || c->cmd_type == CMD_IOACCEL2))
5401                 complete_scsi_command(c);
5402         else if (c->cmd_type == CMD_IOCTL_PEND)
5403                 complete(c->waiting);
5404 }
5405
5406
5407 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5408 {
5409 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5410 #define HPSA_SIMPLE_ERROR_BITS 0x03
5411         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5412                 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5413         return tag & ~HPSA_PERF_ERROR_BITS;
5414 }
5415
5416 /* process completion of an indexed ("direct lookup") command */
5417 static inline void process_indexed_cmd(struct ctlr_info *h,
5418         u32 raw_tag)
5419 {
5420         u32 tag_index;
5421         struct CommandList *c;
5422
5423         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5424         if (!bad_tag(h, tag_index, raw_tag)) {
5425                 c = h->cmd_pool + tag_index;
5426                 finish_cmd(c);
5427         }
5428 }
5429
5430 /* Some controllers, like p400, will give us one interrupt
5431  * after a soft reset, even if we turned interrupts off.
5432  * Only need to check for this in the hpsa_xxx_discard_completions
5433  * functions.
5434  */
5435 static int ignore_bogus_interrupt(struct ctlr_info *h)
5436 {
5437         if (likely(!reset_devices))
5438                 return 0;
5439
5440         if (likely(h->interrupts_enabled))
5441                 return 0;
5442
5443         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5444                 "(known firmware bug.)  Ignoring.\n");
5445
5446         return 1;
5447 }
5448
5449 /*
5450  * Convert &h->q[x] (passed to interrupt handlers) back to h.
5451  * Relies on (h-q[x] == x) being true for x such that
5452  * 0 <= x < MAX_REPLY_QUEUES.
5453  */
5454 static struct ctlr_info *queue_to_hba(u8 *queue)
5455 {
5456         return container_of((queue - *queue), struct ctlr_info, q[0]);
5457 }
5458
5459 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5460 {
5461         struct ctlr_info *h = queue_to_hba(queue);
5462         u8 q = *(u8 *) queue;
5463         u32 raw_tag;
5464
5465         if (ignore_bogus_interrupt(h))
5466                 return IRQ_NONE;
5467
5468         if (interrupt_not_for_us(h))
5469                 return IRQ_NONE;
5470         h->last_intr_timestamp = get_jiffies_64();
5471         while (interrupt_pending(h)) {
5472                 raw_tag = get_next_completion(h, q);
5473                 while (raw_tag != FIFO_EMPTY)
5474                         raw_tag = next_command(h, q);
5475         }
5476         return IRQ_HANDLED;
5477 }
5478
5479 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5480 {
5481         struct ctlr_info *h = queue_to_hba(queue);
5482         u32 raw_tag;
5483         u8 q = *(u8 *) queue;
5484
5485         if (ignore_bogus_interrupt(h))
5486                 return IRQ_NONE;
5487
5488         h->last_intr_timestamp = get_jiffies_64();
5489         raw_tag = get_next_completion(h, q);
5490         while (raw_tag != FIFO_EMPTY)
5491                 raw_tag = next_command(h, q);
5492         return IRQ_HANDLED;
5493 }
5494
5495 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5496 {
5497         struct ctlr_info *h = queue_to_hba((u8 *) queue);
5498         u32 raw_tag;
5499         u8 q = *(u8 *) queue;
5500
5501         if (interrupt_not_for_us(h))
5502                 return IRQ_NONE;
5503         h->last_intr_timestamp = get_jiffies_64();
5504         while (interrupt_pending(h)) {
5505                 raw_tag = get_next_completion(h, q);
5506                 while (raw_tag != FIFO_EMPTY) {
5507                         process_indexed_cmd(h, raw_tag);
5508                         raw_tag = next_command(h, q);
5509                 }
5510         }
5511         return IRQ_HANDLED;
5512 }
5513
5514 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5515 {
5516         struct ctlr_info *h = queue_to_hba(queue);
5517         u32 raw_tag;
5518         u8 q = *(u8 *) queue;
5519
5520         h->last_intr_timestamp = get_jiffies_64();
5521         raw_tag = get_next_completion(h, q);
5522         while (raw_tag != FIFO_EMPTY) {
5523                 process_indexed_cmd(h, raw_tag);
5524                 raw_tag = next_command(h, q);
5525         }
5526         return IRQ_HANDLED;
5527 }
5528
5529 /* Send a message CDB to the firmware. Careful, this only works
5530  * in simple mode, not performant mode due to the tag lookup.
5531  * We only ever use this immediately after a controller reset.
5532  */
5533 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5534                         unsigned char type)
5535 {
5536         struct Command {
5537                 struct CommandListHeader CommandHeader;
5538                 struct RequestBlock Request;
5539                 struct ErrDescriptor ErrorDescriptor;
5540         };
5541         struct Command *cmd;
5542         static const size_t cmd_sz = sizeof(*cmd) +
5543                                         sizeof(cmd->ErrorDescriptor);
5544         dma_addr_t paddr64;
5545         __le32 paddr32;
5546         u32 tag;
5547         void __iomem *vaddr;
5548         int i, err;
5549
5550         vaddr = pci_ioremap_bar(pdev, 0);
5551         if (vaddr == NULL)
5552                 return -ENOMEM;
5553
5554         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5555          * CCISS commands, so they must be allocated from the lower 4GiB of
5556          * memory.
5557          */
5558         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5559         if (err) {
5560                 iounmap(vaddr);
5561                 return err;
5562         }
5563
5564         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5565         if (cmd == NULL) {
5566                 iounmap(vaddr);
5567                 return -ENOMEM;
5568         }
5569
5570         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
5571          * although there's no guarantee, we assume that the address is at
5572          * least 4-byte aligned (most likely, it's page-aligned).
5573          */
5574         paddr32 = cpu_to_le32(paddr64);
5575
5576         cmd->CommandHeader.ReplyQueue = 0;
5577         cmd->CommandHeader.SGList = 0;
5578         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5579         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5580         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5581
5582         cmd->Request.CDBLen = 16;
5583         cmd->Request.type_attr_dir =
5584                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5585         cmd->Request.Timeout = 0; /* Don't time out */
5586         cmd->Request.CDB[0] = opcode;
5587         cmd->Request.CDB[1] = type;
5588         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5589         cmd->ErrorDescriptor.Addr =
5590                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5591         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5592
5593         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5594
5595         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5596                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5597                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5598                         break;
5599                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5600         }
5601
5602         iounmap(vaddr);
5603
5604         /* we leak the DMA buffer here ... no choice since the controller could
5605          *  still complete the command.
5606          */
5607         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5608                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5609                         opcode, type);
5610                 return -ETIMEDOUT;
5611         }
5612
5613         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5614
5615         if (tag & HPSA_ERROR_BIT) {
5616                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5617                         opcode, type);
5618                 return -EIO;
5619         }
5620
5621         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5622                 opcode, type);
5623         return 0;
5624 }
5625
5626 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5627
5628 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5629         void __iomem *vaddr, u32 use_doorbell)
5630 {
5631
5632         if (use_doorbell) {
5633                 /* For everything after the P600, the PCI power state method
5634                  * of resetting the controller doesn't work, so we have this
5635                  * other way using the doorbell register.
5636                  */
5637                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5638                 writel(use_doorbell, vaddr + SA5_DOORBELL);
5639
5640                 /* PMC hardware guys tell us we need a 10 second delay after
5641                  * doorbell reset and before any attempt to talk to the board
5642                  * at all to ensure that this actually works and doesn't fall
5643                  * over in some weird corner cases.
5644                  */
5645                 msleep(10000);
5646         } else { /* Try to do it the PCI power state way */
5647
5648                 /* Quoting from the Open CISS Specification: "The Power
5649                  * Management Control/Status Register (CSR) controls the power
5650                  * state of the device.  The normal operating state is D0,
5651                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
5652                  * the controller, place the interface device in D3 then to D0,
5653                  * this causes a secondary PCI reset which will reset the
5654                  * controller." */
5655
5656                 int rc = 0;
5657
5658                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5659
5660                 /* enter the D3hot power management state */
5661                 rc = pci_set_power_state(pdev, PCI_D3hot);
5662                 if (rc)
5663                         return rc;
5664
5665                 msleep(500);
5666
5667                 /* enter the D0 power management state */
5668                 rc = pci_set_power_state(pdev, PCI_D0);
5669                 if (rc)
5670                         return rc;
5671
5672                 /*
5673                  * The P600 requires a small delay when changing states.
5674                  * Otherwise we may think the board did not reset and we bail.
5675                  * This for kdump only and is particular to the P600.
5676                  */
5677                 msleep(500);
5678         }
5679         return 0;
5680 }
5681
5682 static void init_driver_version(char *driver_version, int len)
5683 {
5684         memset(driver_version, 0, len);
5685         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5686 }
5687
5688 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5689 {
5690         char *driver_version;
5691         int i, size = sizeof(cfgtable->driver_version);
5692
5693         driver_version = kmalloc(size, GFP_KERNEL);
5694         if (!driver_version)
5695                 return -ENOMEM;
5696
5697         init_driver_version(driver_version, size);
5698         for (i = 0; i < size; i++)
5699                 writeb(driver_version[i], &cfgtable->driver_version[i]);
5700         kfree(driver_version);
5701         return 0;
5702 }
5703
5704 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5705                                           unsigned char *driver_ver)
5706 {
5707         int i;
5708
5709         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5710                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5711 }
5712
5713 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5714 {
5715
5716         char *driver_ver, *old_driver_ver;
5717         int rc, size = sizeof(cfgtable->driver_version);
5718
5719         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5720         if (!old_driver_ver)
5721                 return -ENOMEM;
5722         driver_ver = old_driver_ver + size;
5723
5724         /* After a reset, the 32 bytes of "driver version" in the cfgtable
5725          * should have been changed, otherwise we know the reset failed.
5726          */
5727         init_driver_version(old_driver_ver, size);
5728         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5729         rc = !memcmp(driver_ver, old_driver_ver, size);
5730         kfree(old_driver_ver);
5731         return rc;
5732 }
5733 /* This does a hard reset of the controller using PCI power management
5734  * states or the using the doorbell register.
5735  */
5736 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5737 {
5738         u64 cfg_offset;
5739         u32 cfg_base_addr;
5740         u64 cfg_base_addr_index;
5741         void __iomem *vaddr;
5742         unsigned long paddr;
5743         u32 misc_fw_support;
5744         int rc;
5745         struct CfgTable __iomem *cfgtable;
5746         u32 use_doorbell;
5747         u32 board_id;
5748         u16 command_register;
5749
5750         /* For controllers as old as the P600, this is very nearly
5751          * the same thing as
5752          *
5753          * pci_save_state(pci_dev);
5754          * pci_set_power_state(pci_dev, PCI_D3hot);
5755          * pci_set_power_state(pci_dev, PCI_D0);
5756          * pci_restore_state(pci_dev);
5757          *
5758          * For controllers newer than the P600, the pci power state
5759          * method of resetting doesn't work so we have another way
5760          * using the doorbell register.
5761          */
5762
5763         rc = hpsa_lookup_board_id(pdev, &board_id);
5764         if (rc < 0) {
5765                 dev_warn(&pdev->dev, "Board ID not found\n");
5766                 return rc;
5767         }
5768         if (!ctlr_is_resettable(board_id)) {
5769                 dev_warn(&pdev->dev, "Controller not resettable\n");
5770                 return -ENODEV;
5771         }
5772
5773         /* if controller is soft- but not hard resettable... */
5774         if (!ctlr_is_hard_resettable(board_id))
5775                 return -ENOTSUPP; /* try soft reset later. */
5776
5777         /* Save the PCI command register */
5778         pci_read_config_word(pdev, 4, &command_register);
5779         pci_save_state(pdev);
5780
5781         /* find the first memory BAR, so we can find the cfg table */
5782         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5783         if (rc)
5784                 return rc;
5785         vaddr = remap_pci_mem(paddr, 0x250);
5786         if (!vaddr)
5787                 return -ENOMEM;
5788
5789         /* find cfgtable in order to check if reset via doorbell is supported */
5790         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5791                                         &cfg_base_addr_index, &cfg_offset);
5792         if (rc)
5793                 goto unmap_vaddr;
5794         cfgtable = remap_pci_mem(pci_resource_start(pdev,
5795                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5796         if (!cfgtable) {
5797                 rc = -ENOMEM;
5798                 goto unmap_vaddr;
5799         }
5800         rc = write_driver_ver_to_cfgtable(cfgtable);
5801         if (rc)
5802                 goto unmap_cfgtable;
5803
5804         /* If reset via doorbell register is supported, use that.
5805          * There are two such methods.  Favor the newest method.
5806          */
5807         misc_fw_support = readl(&cfgtable->misc_fw_support);
5808         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5809         if (use_doorbell) {
5810                 use_doorbell = DOORBELL_CTLR_RESET2;
5811         } else {
5812                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5813                 if (use_doorbell) {
5814                         dev_warn(&pdev->dev,
5815                                 "Soft reset not supported. Firmware update is required.\n");
5816                         rc = -ENOTSUPP; /* try soft reset */
5817                         goto unmap_cfgtable;
5818                 }
5819         }
5820
5821         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5822         if (rc)
5823                 goto unmap_cfgtable;
5824
5825         pci_restore_state(pdev);
5826         pci_write_config_word(pdev, 4, command_register);
5827
5828         /* Some devices (notably the HP Smart Array 5i Controller)
5829            need a little pause here */
5830         msleep(HPSA_POST_RESET_PAUSE_MSECS);
5831
5832         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5833         if (rc) {
5834                 dev_warn(&pdev->dev,
5835                         "Failed waiting for board to become ready after hard reset\n");
5836                 goto unmap_cfgtable;
5837         }
5838
5839         rc = controller_reset_failed(vaddr);
5840         if (rc < 0)
5841                 goto unmap_cfgtable;
5842         if (rc) {
5843                 dev_warn(&pdev->dev, "Unable to successfully reset "
5844                         "controller. Will try soft reset.\n");
5845                 rc = -ENOTSUPP;
5846         } else {
5847                 dev_info(&pdev->dev, "board ready after hard reset.\n");
5848         }
5849
5850 unmap_cfgtable:
5851         iounmap(cfgtable);
5852
5853 unmap_vaddr:
5854         iounmap(vaddr);
5855         return rc;
5856 }
5857
5858 /*
5859  *  We cannot read the structure directly, for portability we must use
5860  *   the io functions.
5861  *   This is for debug only.
5862  */
5863 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
5864 {
5865 #ifdef HPSA_DEBUG
5866         int i;
5867         char temp_name[17];
5868
5869         dev_info(dev, "Controller Configuration information\n");
5870         dev_info(dev, "------------------------------------\n");
5871         for (i = 0; i < 4; i++)
5872                 temp_name[i] = readb(&(tb->Signature[i]));
5873         temp_name[4] = '\0';
5874         dev_info(dev, "   Signature = %s\n", temp_name);
5875         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
5876         dev_info(dev, "   Transport methods supported = 0x%x\n",
5877                readl(&(tb->TransportSupport)));
5878         dev_info(dev, "   Transport methods active = 0x%x\n",
5879                readl(&(tb->TransportActive)));
5880         dev_info(dev, "   Requested transport Method = 0x%x\n",
5881                readl(&(tb->HostWrite.TransportRequest)));
5882         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
5883                readl(&(tb->HostWrite.CoalIntDelay)));
5884         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
5885                readl(&(tb->HostWrite.CoalIntCount)));
5886         dev_info(dev, "   Max outstanding commands = %d\n",
5887                readl(&(tb->CmdsOutMax)));
5888         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
5889         for (i = 0; i < 16; i++)
5890                 temp_name[i] = readb(&(tb->ServerName[i]));
5891         temp_name[16] = '\0';
5892         dev_info(dev, "   Server Name = %s\n", temp_name);
5893         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
5894                 readl(&(tb->HeartBeat)));
5895 #endif                          /* HPSA_DEBUG */
5896 }
5897
5898 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
5899 {
5900         int i, offset, mem_type, bar_type;
5901
5902         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
5903                 return 0;
5904         offset = 0;
5905         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5906                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
5907                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
5908                         offset += 4;
5909                 else {
5910                         mem_type = pci_resource_flags(pdev, i) &
5911                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
5912                         switch (mem_type) {
5913                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
5914                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
5915                                 offset += 4;    /* 32 bit */
5916                                 break;
5917                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
5918                                 offset += 8;
5919                                 break;
5920                         default:        /* reserved in PCI 2.2 */
5921                                 dev_warn(&pdev->dev,
5922                                        "base address is invalid\n");
5923                                 return -1;
5924                                 break;
5925                         }
5926                 }
5927                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
5928                         return i + 1;
5929         }
5930         return -1;
5931 }
5932
5933 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
5934  * controllers that are capable. If not, we use legacy INTx mode.
5935  */
5936
5937 static void hpsa_interrupt_mode(struct ctlr_info *h)
5938 {
5939 #ifdef CONFIG_PCI_MSI
5940         int err, i;
5941         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
5942
5943         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
5944                 hpsa_msix_entries[i].vector = 0;
5945                 hpsa_msix_entries[i].entry = i;
5946         }
5947
5948         /* Some boards advertise MSI but don't really support it */
5949         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
5950             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
5951                 goto default_int_mode;
5952         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
5953                 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
5954                 h->msix_vector = MAX_REPLY_QUEUES;
5955                 if (h->msix_vector > num_online_cpus())
5956                         h->msix_vector = num_online_cpus();
5957                 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
5958                                             1, h->msix_vector);
5959                 if (err < 0) {
5960                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
5961                         h->msix_vector = 0;
5962                         goto single_msi_mode;
5963                 } else if (err < h->msix_vector) {
5964                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
5965                                "available\n", err);
5966                 }
5967                 h->msix_vector = err;
5968                 for (i = 0; i < h->msix_vector; i++)
5969                         h->intr[i] = hpsa_msix_entries[i].vector;
5970                 return;
5971         }
5972 single_msi_mode:
5973         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
5974                 dev_info(&h->pdev->dev, "MSI capable controller\n");
5975                 if (!pci_enable_msi(h->pdev))
5976                         h->msi_vector = 1;
5977                 else
5978                         dev_warn(&h->pdev->dev, "MSI init failed\n");
5979         }
5980 default_int_mode:
5981 #endif                          /* CONFIG_PCI_MSI */
5982         /* if we get here we're going to use the default interrupt mode */
5983         h->intr[h->intr_mode] = h->pdev->irq;
5984 }
5985
5986 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
5987 {
5988         int i;
5989         u32 subsystem_vendor_id, subsystem_device_id;
5990
5991         subsystem_vendor_id = pdev->subsystem_vendor;
5992         subsystem_device_id = pdev->subsystem_device;
5993         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
5994                     subsystem_vendor_id;
5995
5996         for (i = 0; i < ARRAY_SIZE(products); i++)
5997                 if (*board_id == products[i].board_id)
5998                         return i;
5999
6000         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6001                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6002                 !hpsa_allow_any) {
6003                 dev_warn(&pdev->dev, "unrecognized board ID: "
6004                         "0x%08x, ignoring.\n", *board_id);
6005                         return -ENODEV;
6006         }
6007         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6008 }
6009
6010 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6011                                     unsigned long *memory_bar)
6012 {
6013         int i;
6014
6015         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6016                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6017                         /* addressing mode bits already removed */
6018                         *memory_bar = pci_resource_start(pdev, i);
6019                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6020                                 *memory_bar);
6021                         return 0;
6022                 }
6023         dev_warn(&pdev->dev, "no memory BAR found\n");
6024         return -ENODEV;
6025 }
6026
6027 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6028                                      int wait_for_ready)
6029 {
6030         int i, iterations;
6031         u32 scratchpad;
6032         if (wait_for_ready)
6033                 iterations = HPSA_BOARD_READY_ITERATIONS;
6034         else
6035                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6036
6037         for (i = 0; i < iterations; i++) {
6038                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6039                 if (wait_for_ready) {
6040                         if (scratchpad == HPSA_FIRMWARE_READY)
6041                                 return 0;
6042                 } else {
6043                         if (scratchpad != HPSA_FIRMWARE_READY)
6044                                 return 0;
6045                 }
6046                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6047         }
6048         dev_warn(&pdev->dev, "board not ready, timed out.\n");
6049         return -ENODEV;
6050 }
6051
6052 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6053                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6054                                u64 *cfg_offset)
6055 {
6056         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6057         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6058         *cfg_base_addr &= (u32) 0x0000ffff;
6059         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6060         if (*cfg_base_addr_index == -1) {
6061                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6062                 return -ENODEV;
6063         }
6064         return 0;
6065 }
6066
6067 static int hpsa_find_cfgtables(struct ctlr_info *h)
6068 {
6069         u64 cfg_offset;
6070         u32 cfg_base_addr;
6071         u64 cfg_base_addr_index;
6072         u32 trans_offset;
6073         int rc;
6074
6075         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6076                 &cfg_base_addr_index, &cfg_offset);
6077         if (rc)
6078                 return rc;
6079         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6080                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6081         if (!h->cfgtable) {
6082                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6083                 return -ENOMEM;
6084         }
6085         rc = write_driver_ver_to_cfgtable(h->cfgtable);
6086         if (rc)
6087                 return rc;
6088         /* Find performant mode table. */
6089         trans_offset = readl(&h->cfgtable->TransMethodOffset);
6090         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6091                                 cfg_base_addr_index)+cfg_offset+trans_offset,
6092                                 sizeof(*h->transtable));
6093         if (!h->transtable)
6094                 return -ENOMEM;
6095         return 0;
6096 }
6097
6098 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6099 {
6100         h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
6101
6102         /* Limit commands in memory limited kdump scenario. */
6103         if (reset_devices && h->max_commands > 32)
6104                 h->max_commands = 32;
6105
6106         if (h->max_commands < 16) {
6107                 dev_warn(&h->pdev->dev, "Controller reports "
6108                         "max supported commands of %d, an obvious lie. "
6109                         "Using 16.  Ensure that firmware is up to date.\n",
6110                         h->max_commands);
6111                 h->max_commands = 16;
6112         }
6113 }
6114
6115 /* If the controller reports that the total max sg entries is greater than 512,
6116  * then we know that chained SG blocks work.  (Original smart arrays did not
6117  * support chained SG blocks and would return zero for max sg entries.)
6118  */
6119 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6120 {
6121         return h->maxsgentries > 512;
6122 }
6123
6124 /* Interrogate the hardware for some limits:
6125  * max commands, max SG elements without chaining, and with chaining,
6126  * SG chain block size, etc.
6127  */
6128 static void hpsa_find_board_params(struct ctlr_info *h)
6129 {
6130         hpsa_get_max_perf_mode_cmds(h);
6131         h->nr_cmds = h->max_commands;
6132         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6133         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6134         if (hpsa_supports_chained_sg_blocks(h)) {
6135                 /* Limit in-command s/g elements to 32 save dma'able memory. */
6136                 h->max_cmd_sg_entries = 32;
6137                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6138                 h->maxsgentries--; /* save one for chain pointer */
6139         } else {
6140                 /*
6141                  * Original smart arrays supported at most 31 s/g entries
6142                  * embedded inline in the command (trying to use more
6143                  * would lock up the controller)
6144                  */
6145                 h->max_cmd_sg_entries = 31;
6146                 h->maxsgentries = 31; /* default to traditional values */
6147                 h->chainsize = 0;
6148         }
6149
6150         /* Find out what task management functions are supported and cache */
6151         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6152         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6153                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6154         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6155                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6156 }
6157
6158 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6159 {
6160         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6161                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
6162                 return false;
6163         }
6164         return true;
6165 }
6166
6167 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6168 {
6169         u32 driver_support;
6170
6171         driver_support = readl(&(h->cfgtable->driver_support));
6172         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6173 #ifdef CONFIG_X86
6174         driver_support |= ENABLE_SCSI_PREFETCH;
6175 #endif
6176         driver_support |= ENABLE_UNIT_ATTN;
6177         writel(driver_support, &(h->cfgtable->driver_support));
6178 }
6179
6180 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
6181  * in a prefetch beyond physical memory.
6182  */
6183 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6184 {
6185         u32 dma_prefetch;
6186
6187         if (h->board_id != 0x3225103C)
6188                 return;
6189         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6190         dma_prefetch |= 0x8000;
6191         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6192 }
6193
6194 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6195 {
6196         int i;
6197         u32 doorbell_value;
6198         unsigned long flags;
6199         /* wait until the clear_event_notify bit 6 is cleared by controller. */
6200         for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
6201                 spin_lock_irqsave(&h->lock, flags);
6202                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6203                 spin_unlock_irqrestore(&h->lock, flags);
6204                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6205                         goto done;
6206                 /* delay and try again */
6207                 msleep(CLEAR_EVENT_WAIT_INTERVAL);
6208         }
6209         return -ENODEV;
6210 done:
6211         return 0;
6212 }
6213
6214 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6215 {
6216         int i;
6217         u32 doorbell_value;
6218         unsigned long flags;
6219
6220         /* under certain very rare conditions, this can take awhile.
6221          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6222          * as we enter this code.)
6223          */
6224         for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
6225                 spin_lock_irqsave(&h->lock, flags);
6226                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6227                 spin_unlock_irqrestore(&h->lock, flags);
6228                 if (!(doorbell_value & CFGTBL_ChangeReq))
6229                         goto done;
6230                 /* delay and try again */
6231                 msleep(MODE_CHANGE_WAIT_INTERVAL);
6232         }
6233         return -ENODEV;
6234 done:
6235         return 0;
6236 }
6237
6238 /* return -ENODEV or other reason on error, 0 on success */
6239 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6240 {
6241         u32 trans_support;
6242
6243         trans_support = readl(&(h->cfgtable->TransportSupport));
6244         if (!(trans_support & SIMPLE_MODE))
6245                 return -ENOTSUPP;
6246
6247         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6248
6249         /* Update the field, and then ring the doorbell */
6250         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6251         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6252         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6253         if (hpsa_wait_for_mode_change_ack(h))
6254                 goto error;
6255         print_cfg_table(&h->pdev->dev, h->cfgtable);
6256         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6257                 goto error;
6258         h->transMethod = CFGTBL_Trans_Simple;
6259         return 0;
6260 error:
6261         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6262         return -ENODEV;
6263 }
6264
6265 static int hpsa_pci_init(struct ctlr_info *h)
6266 {
6267         int prod_index, err;
6268
6269         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6270         if (prod_index < 0)
6271                 return prod_index;
6272         h->product_name = products[prod_index].product_name;
6273         h->access = *(products[prod_index].access);
6274
6275         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6276                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6277
6278         err = pci_enable_device(h->pdev);
6279         if (err) {
6280                 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6281                 return err;
6282         }
6283
6284         err = pci_request_regions(h->pdev, HPSA);
6285         if (err) {
6286                 dev_err(&h->pdev->dev,
6287                         "cannot obtain PCI resources, aborting\n");
6288                 return err;
6289         }
6290
6291         pci_set_master(h->pdev);
6292
6293         hpsa_interrupt_mode(h);
6294         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6295         if (err)
6296                 goto err_out_free_res;
6297         h->vaddr = remap_pci_mem(h->paddr, 0x250);
6298         if (!h->vaddr) {
6299                 err = -ENOMEM;
6300                 goto err_out_free_res;
6301         }
6302         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6303         if (err)
6304                 goto err_out_free_res;
6305         err = hpsa_find_cfgtables(h);
6306         if (err)
6307                 goto err_out_free_res;
6308         hpsa_find_board_params(h);
6309
6310         if (!hpsa_CISS_signature_present(h)) {
6311                 err = -ENODEV;
6312                 goto err_out_free_res;
6313         }
6314         hpsa_set_driver_support_bits(h);
6315         hpsa_p600_dma_prefetch_quirk(h);
6316         err = hpsa_enter_simple_mode(h);
6317         if (err)
6318                 goto err_out_free_res;
6319         return 0;
6320
6321 err_out_free_res:
6322         if (h->transtable)
6323                 iounmap(h->transtable);
6324         if (h->cfgtable)
6325                 iounmap(h->cfgtable);
6326         if (h->vaddr)
6327                 iounmap(h->vaddr);
6328         pci_disable_device(h->pdev);
6329         pci_release_regions(h->pdev);
6330         return err;
6331 }
6332
6333 static void hpsa_hba_inquiry(struct ctlr_info *h)
6334 {
6335         int rc;
6336
6337 #define HBA_INQUIRY_BYTE_COUNT 64
6338         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6339         if (!h->hba_inquiry_data)
6340                 return;
6341         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6342                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6343         if (rc != 0) {
6344                 kfree(h->hba_inquiry_data);
6345                 h->hba_inquiry_data = NULL;
6346         }
6347 }
6348
6349 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6350 {
6351         int rc, i;
6352         void __iomem *vaddr;
6353
6354         if (!reset_devices)
6355                 return 0;
6356
6357         /* kdump kernel is loading, we don't know in which state is
6358          * the pci interface. The dev->enable_cnt is equal zero
6359          * so we call enable+disable, wait a while and switch it on.
6360          */
6361         rc = pci_enable_device(pdev);
6362         if (rc) {
6363                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6364                 return -ENODEV;
6365         }
6366         pci_disable_device(pdev);
6367         msleep(260);                    /* a randomly chosen number */
6368         rc = pci_enable_device(pdev);
6369         if (rc) {
6370                 dev_warn(&pdev->dev, "failed to enable device.\n");
6371                 return -ENODEV;
6372         }
6373
6374         pci_set_master(pdev);
6375
6376         vaddr = pci_ioremap_bar(pdev, 0);
6377         if (vaddr == NULL) {
6378                 rc = -ENOMEM;
6379                 goto out_disable;
6380         }
6381         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6382         iounmap(vaddr);
6383
6384         /* Reset the controller with a PCI power-cycle or via doorbell */
6385         rc = hpsa_kdump_hard_reset_controller(pdev);
6386
6387         /* -ENOTSUPP here means we cannot reset the controller
6388          * but it's already (and still) up and running in
6389          * "performant mode".  Or, it might be 640x, which can't reset
6390          * due to concerns about shared bbwc between 6402/6404 pair.
6391          */
6392         if (rc)
6393                 goto out_disable;
6394
6395         /* Now try to get the controller to respond to a no-op */
6396         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6397         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6398                 if (hpsa_noop(pdev) == 0)
6399                         break;
6400                 else
6401                         dev_warn(&pdev->dev, "no-op failed%s\n",
6402                                         (i < 11 ? "; re-trying" : ""));
6403         }
6404
6405 out_disable:
6406
6407         pci_disable_device(pdev);
6408         return rc;
6409 }
6410
6411 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6412 {
6413         h->cmd_pool_bits = kzalloc(
6414                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6415                 sizeof(unsigned long), GFP_KERNEL);
6416         h->cmd_pool = pci_alloc_consistent(h->pdev,
6417                     h->nr_cmds * sizeof(*h->cmd_pool),
6418                     &(h->cmd_pool_dhandle));
6419         h->errinfo_pool = pci_alloc_consistent(h->pdev,
6420                     h->nr_cmds * sizeof(*h->errinfo_pool),
6421                     &(h->errinfo_pool_dhandle));
6422         if ((h->cmd_pool_bits == NULL)
6423             || (h->cmd_pool == NULL)
6424             || (h->errinfo_pool == NULL)) {
6425                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6426                 goto clean_up;
6427         }
6428         return 0;
6429 clean_up:
6430         hpsa_free_cmd_pool(h);
6431         return -ENOMEM;
6432 }
6433
6434 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6435 {
6436         kfree(h->cmd_pool_bits);
6437         if (h->cmd_pool)
6438                 pci_free_consistent(h->pdev,
6439                             h->nr_cmds * sizeof(struct CommandList),
6440                             h->cmd_pool, h->cmd_pool_dhandle);
6441         if (h->ioaccel2_cmd_pool)
6442                 pci_free_consistent(h->pdev,
6443                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6444                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6445         if (h->errinfo_pool)
6446                 pci_free_consistent(h->pdev,
6447                             h->nr_cmds * sizeof(struct ErrorInfo),
6448                             h->errinfo_pool,
6449                             h->errinfo_pool_dhandle);
6450         if (h->ioaccel_cmd_pool)
6451                 pci_free_consistent(h->pdev,
6452                         h->nr_cmds * sizeof(struct io_accel1_cmd),
6453                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6454 }
6455
6456 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6457 {
6458         int i, cpu;
6459
6460         cpu = cpumask_first(cpu_online_mask);
6461         for (i = 0; i < h->msix_vector; i++) {
6462                 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6463                 cpu = cpumask_next(cpu, cpu_online_mask);
6464         }
6465 }
6466
6467 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6468 static void hpsa_free_irqs(struct ctlr_info *h)
6469 {
6470         int i;
6471
6472         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6473                 /* Single reply queue, only one irq to free */
6474                 i = h->intr_mode;
6475                 irq_set_affinity_hint(h->intr[i], NULL);
6476                 free_irq(h->intr[i], &h->q[i]);
6477                 return;
6478         }
6479
6480         for (i = 0; i < h->msix_vector; i++) {
6481                 irq_set_affinity_hint(h->intr[i], NULL);
6482                 free_irq(h->intr[i], &h->q[i]);
6483         }
6484         for (; i < MAX_REPLY_QUEUES; i++)
6485                 h->q[i] = 0;
6486 }
6487
6488 /* returns 0 on success; cleans up and returns -Enn on error */
6489 static int hpsa_request_irqs(struct ctlr_info *h,
6490         irqreturn_t (*msixhandler)(int, void *),
6491         irqreturn_t (*intxhandler)(int, void *))
6492 {
6493         int rc, i;
6494
6495         /*
6496          * initialize h->q[x] = x so that interrupt handlers know which
6497          * queue to process.
6498          */
6499         for (i = 0; i < MAX_REPLY_QUEUES; i++)
6500                 h->q[i] = (u8) i;
6501
6502         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6503                 /* If performant mode and MSI-X, use multiple reply queues */
6504                 for (i = 0; i < h->msix_vector; i++) {
6505                         rc = request_irq(h->intr[i], msixhandler,
6506                                         0, h->devname,
6507                                         &h->q[i]);
6508                         if (rc) {
6509                                 int j;
6510
6511                                 dev_err(&h->pdev->dev,
6512                                         "failed to get irq %d for %s\n",
6513                                        h->intr[i], h->devname);
6514                                 for (j = 0; j < i; j++) {
6515                                         free_irq(h->intr[j], &h->q[j]);
6516                                         h->q[j] = 0;
6517                                 }
6518                                 for (; j < MAX_REPLY_QUEUES; j++)
6519                                         h->q[j] = 0;
6520                                 return rc;
6521                         }
6522                 }
6523                 hpsa_irq_affinity_hints(h);
6524         } else {
6525                 /* Use single reply pool */
6526                 if (h->msix_vector > 0 || h->msi_vector) {
6527                         rc = request_irq(h->intr[h->intr_mode],
6528                                 msixhandler, 0, h->devname,
6529                                 &h->q[h->intr_mode]);
6530                 } else {
6531                         rc = request_irq(h->intr[h->intr_mode],
6532                                 intxhandler, IRQF_SHARED, h->devname,
6533                                 &h->q[h->intr_mode]);
6534                 }
6535         }
6536         if (rc) {
6537                 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6538                        h->intr[h->intr_mode], h->devname);
6539                 return -ENODEV;
6540         }
6541         return 0;
6542 }
6543
6544 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6545 {
6546         if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6547                 HPSA_RESET_TYPE_CONTROLLER)) {
6548                 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6549                 return -EIO;
6550         }
6551
6552         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6553         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6554                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6555                 return -1;
6556         }
6557
6558         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6559         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6560                 dev_warn(&h->pdev->dev, "Board failed to become ready "
6561                         "after soft reset.\n");
6562                 return -1;
6563         }
6564
6565         return 0;
6566 }
6567
6568 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6569 {
6570         hpsa_free_irqs(h);
6571 #ifdef CONFIG_PCI_MSI
6572         if (h->msix_vector) {
6573                 if (h->pdev->msix_enabled)
6574                         pci_disable_msix(h->pdev);
6575         } else if (h->msi_vector) {
6576                 if (h->pdev->msi_enabled)
6577                         pci_disable_msi(h->pdev);
6578         }
6579 #endif /* CONFIG_PCI_MSI */
6580 }
6581
6582 static void hpsa_free_reply_queues(struct ctlr_info *h)
6583 {
6584         int i;
6585
6586         for (i = 0; i < h->nreply_queues; i++) {
6587                 if (!h->reply_queue[i].head)
6588                         continue;
6589                 pci_free_consistent(h->pdev, h->reply_queue_size,
6590                         h->reply_queue[i].head, h->reply_queue[i].busaddr);
6591                 h->reply_queue[i].head = NULL;
6592                 h->reply_queue[i].busaddr = 0;
6593         }
6594 }
6595
6596 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6597 {
6598         hpsa_free_irqs_and_disable_msix(h);
6599         hpsa_free_sg_chain_blocks(h);
6600         hpsa_free_cmd_pool(h);
6601         kfree(h->ioaccel1_blockFetchTable);
6602         kfree(h->blockFetchTable);
6603         hpsa_free_reply_queues(h);
6604         if (h->vaddr)
6605                 iounmap(h->vaddr);
6606         if (h->transtable)
6607                 iounmap(h->transtable);
6608         if (h->cfgtable)
6609                 iounmap(h->cfgtable);
6610         pci_disable_device(h->pdev);
6611         pci_release_regions(h->pdev);
6612         kfree(h);
6613 }
6614
6615 /* Called when controller lockup detected. */
6616 static void fail_all_outstanding_cmds(struct ctlr_info *h)
6617 {
6618         int i, refcount;
6619         struct CommandList *c;
6620
6621         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6622         for (i = 0; i < h->nr_cmds; i++) {
6623                 c = h->cmd_pool + i;
6624                 refcount = atomic_inc_return(&c->refcount);
6625                 if (refcount > 1) {
6626                         c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6627                         finish_cmd(c);
6628                 }
6629                 cmd_free(h, c);
6630         }
6631 }
6632
6633 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6634 {
6635         int cpu;
6636
6637         for_each_online_cpu(cpu) {
6638                 u32 *lockup_detected;
6639                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6640                 *lockup_detected = value;
6641         }
6642         wmb(); /* be sure the per-cpu variables are out to memory */
6643 }
6644
6645 static void controller_lockup_detected(struct ctlr_info *h)
6646 {
6647         unsigned long flags;
6648         u32 lockup_detected;
6649
6650         h->access.set_intr_mask(h, HPSA_INTR_OFF);
6651         spin_lock_irqsave(&h->lock, flags);
6652         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6653         if (!lockup_detected) {
6654                 /* no heartbeat, but controller gave us a zero. */
6655                 dev_warn(&h->pdev->dev,
6656                         "lockup detected but scratchpad register is zero\n");
6657                 lockup_detected = 0xffffffff;
6658         }
6659         set_lockup_detected_for_all_cpus(h, lockup_detected);
6660         spin_unlock_irqrestore(&h->lock, flags);
6661         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6662                         lockup_detected);
6663         pci_disable_device(h->pdev);
6664         fail_all_outstanding_cmds(h);
6665 }
6666
6667 static void detect_controller_lockup(struct ctlr_info *h)
6668 {
6669         u64 now;
6670         u32 heartbeat;
6671         unsigned long flags;
6672
6673         now = get_jiffies_64();
6674         /* If we've received an interrupt recently, we're ok. */
6675         if (time_after64(h->last_intr_timestamp +
6676                                 (h->heartbeat_sample_interval), now))
6677                 return;
6678
6679         /*
6680          * If we've already checked the heartbeat recently, we're ok.
6681          * This could happen if someone sends us a signal. We
6682          * otherwise don't care about signals in this thread.
6683          */
6684         if (time_after64(h->last_heartbeat_timestamp +
6685                                 (h->heartbeat_sample_interval), now))
6686                 return;
6687
6688         /* If heartbeat has not changed since we last looked, we're not ok. */
6689         spin_lock_irqsave(&h->lock, flags);
6690         heartbeat = readl(&h->cfgtable->HeartBeat);
6691         spin_unlock_irqrestore(&h->lock, flags);
6692         if (h->last_heartbeat == heartbeat) {
6693                 controller_lockup_detected(h);
6694                 return;
6695         }
6696
6697         /* We're ok. */
6698         h->last_heartbeat = heartbeat;
6699         h->last_heartbeat_timestamp = now;
6700 }
6701
6702 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6703 {
6704         int i;
6705         char *event_type;
6706
6707         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6708                 return;
6709
6710         /* Ask the controller to clear the events we're handling. */
6711         if ((h->transMethod & (CFGTBL_Trans_io_accel1
6712                         | CFGTBL_Trans_io_accel2)) &&
6713                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6714                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6715
6716                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6717                         event_type = "state change";
6718                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6719                         event_type = "configuration change";
6720                 /* Stop sending new RAID offload reqs via the IO accelerator */
6721                 scsi_block_requests(h->scsi_host);
6722                 for (i = 0; i < h->ndevices; i++)
6723                         h->dev[i]->offload_enabled = 0;
6724                 hpsa_drain_accel_commands(h);
6725                 /* Set 'accelerator path config change' bit */
6726                 dev_warn(&h->pdev->dev,
6727                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6728                         h->events, event_type);
6729                 writel(h->events, &(h->cfgtable->clear_event_notify));
6730                 /* Set the "clear event notify field update" bit 6 */
6731                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6732                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6733                 hpsa_wait_for_clear_event_notify_ack(h);
6734                 scsi_unblock_requests(h->scsi_host);
6735         } else {
6736                 /* Acknowledge controller notification events. */
6737                 writel(h->events, &(h->cfgtable->clear_event_notify));
6738                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6739                 hpsa_wait_for_clear_event_notify_ack(h);
6740 #if 0
6741                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6742                 hpsa_wait_for_mode_change_ack(h);
6743 #endif
6744         }
6745         return;
6746 }
6747
6748 /* Check a register on the controller to see if there are configuration
6749  * changes (added/changed/removed logical drives, etc.) which mean that
6750  * we should rescan the controller for devices.
6751  * Also check flag for driver-initiated rescan.
6752  */
6753 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6754 {
6755         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6756                 return 0;
6757
6758         h->events = readl(&(h->cfgtable->event_notify));
6759         return h->events & RESCAN_REQUIRED_EVENT_BITS;
6760 }
6761
6762 /*
6763  * Check if any of the offline devices have become ready
6764  */
6765 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6766 {
6767         unsigned long flags;
6768         struct offline_device_entry *d;
6769         struct list_head *this, *tmp;
6770
6771         spin_lock_irqsave(&h->offline_device_lock, flags);
6772         list_for_each_safe(this, tmp, &h->offline_device_list) {
6773                 d = list_entry(this, struct offline_device_entry,
6774                                 offline_list);
6775                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6776                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6777                         spin_lock_irqsave(&h->offline_device_lock, flags);
6778                         list_del(&d->offline_list);
6779                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
6780                         return 1;
6781                 }
6782                 spin_lock_irqsave(&h->offline_device_lock, flags);
6783         }
6784         spin_unlock_irqrestore(&h->offline_device_lock, flags);
6785         return 0;
6786 }
6787
6788 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
6789 {
6790         unsigned long flags;
6791         struct ctlr_info *h = container_of(to_delayed_work(work),
6792                                         struct ctlr_info, rescan_ctlr_work);
6793
6794
6795         if (h->remove_in_progress)
6796                 return;
6797
6798         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6799                 scsi_host_get(h->scsi_host);
6800                 hpsa_ack_ctlr_events(h);
6801                 hpsa_scan_start(h->scsi_host);
6802                 scsi_host_put(h->scsi_host);
6803         }
6804         spin_lock_irqsave(&h->lock, flags);
6805         if (!h->remove_in_progress)
6806                 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
6807                                 h->heartbeat_sample_interval);
6808         spin_unlock_irqrestore(&h->lock, flags);
6809 }
6810
6811 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6812 {
6813         unsigned long flags;
6814         struct ctlr_info *h = container_of(to_delayed_work(work),
6815                                         struct ctlr_info, monitor_ctlr_work);
6816
6817         detect_controller_lockup(h);
6818         if (lockup_detected(h))
6819                 return;
6820
6821         spin_lock_irqsave(&h->lock, flags);
6822         if (!h->remove_in_progress)
6823                 schedule_delayed_work(&h->monitor_ctlr_work,
6824                                 h->heartbeat_sample_interval);
6825         spin_unlock_irqrestore(&h->lock, flags);
6826 }
6827
6828 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
6829                                                 char *name)
6830 {
6831         struct workqueue_struct *wq = NULL;
6832
6833         wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6834         if (!wq)
6835                 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
6836
6837         return wq;
6838 }
6839
6840 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6841 {
6842         int dac, rc;
6843         struct ctlr_info *h;
6844         int try_soft_reset = 0;
6845         unsigned long flags;
6846
6847         if (number_of_controllers == 0)
6848                 printk(KERN_INFO DRIVER_NAME "\n");
6849
6850         rc = hpsa_init_reset_devices(pdev);
6851         if (rc) {
6852                 if (rc != -ENOTSUPP)
6853                         return rc;
6854                 /* If the reset fails in a particular way (it has no way to do
6855                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
6856                  * a soft reset once we get the controller configured up to the
6857                  * point that it can accept a command.
6858                  */
6859                 try_soft_reset = 1;
6860                 rc = 0;
6861         }
6862
6863 reinit_after_soft_reset:
6864
6865         /* Command structures must be aligned on a 32-byte boundary because
6866          * the 5 lower bits of the address are used by the hardware. and by
6867          * the driver.  See comments in hpsa.h for more info.
6868          */
6869         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6870         h = kzalloc(sizeof(*h), GFP_KERNEL);
6871         if (!h)
6872                 return -ENOMEM;
6873
6874         h->pdev = pdev;
6875         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6876         INIT_LIST_HEAD(&h->offline_device_list);
6877         spin_lock_init(&h->lock);
6878         spin_lock_init(&h->offline_device_lock);
6879         spin_lock_init(&h->scan_lock);
6880         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
6881
6882         h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
6883         if (!h->rescan_ctlr_wq) {
6884                 rc = -ENOMEM;
6885                 goto clean1;
6886         }
6887
6888         h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
6889         if (!h->resubmit_wq) {
6890                 rc = -ENOMEM;
6891                 goto clean1;
6892         }
6893
6894         /* Allocate and clear per-cpu variable lockup_detected */
6895         h->lockup_detected = alloc_percpu(u32);
6896         if (!h->lockup_detected) {
6897                 rc = -ENOMEM;
6898                 goto clean1;
6899         }
6900         set_lockup_detected_for_all_cpus(h, 0);
6901
6902         rc = hpsa_pci_init(h);
6903         if (rc != 0)
6904                 goto clean1;
6905
6906         sprintf(h->devname, HPSA "%d", number_of_controllers);
6907         h->ctlr = number_of_controllers;
6908         number_of_controllers++;
6909
6910         /* configure PCI DMA stuff */
6911         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6912         if (rc == 0) {
6913                 dac = 1;
6914         } else {
6915                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6916                 if (rc == 0) {
6917                         dac = 0;
6918                 } else {
6919                         dev_err(&pdev->dev, "no suitable DMA available\n");
6920                         goto clean1;
6921                 }
6922         }
6923
6924         /* make sure the board interrupts are off */
6925         h->access.set_intr_mask(h, HPSA_INTR_OFF);
6926
6927         if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
6928                 goto clean2;
6929         dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6930                h->devname, pdev->device,
6931                h->intr[h->intr_mode], dac ? "" : " not");
6932         rc = hpsa_allocate_cmd_pool(h);
6933         if (rc)
6934                 goto clean2_and_free_irqs;
6935         if (hpsa_allocate_sg_chain_blocks(h))
6936                 goto clean4;
6937         init_waitqueue_head(&h->scan_wait_queue);
6938         h->scan_finished = 1; /* no scan currently in progress */
6939
6940         pci_set_drvdata(pdev, h);
6941         h->ndevices = 0;
6942         h->hba_mode_enabled = 0;
6943         h->scsi_host = NULL;
6944         spin_lock_init(&h->devlock);
6945         hpsa_put_ctlr_into_performant_mode(h);
6946
6947         /* At this point, the controller is ready to take commands.
6948          * Now, if reset_devices and the hard reset didn't work, try
6949          * the soft reset and see if that works.
6950          */
6951         if (try_soft_reset) {
6952
6953                 /* This is kind of gross.  We may or may not get a completion
6954                  * from the soft reset command, and if we do, then the value
6955                  * from the fifo may or may not be valid.  So, we wait 10 secs
6956                  * after the reset throwing away any completions we get during
6957                  * that time.  Unregister the interrupt handler and register
6958                  * fake ones to scoop up any residual completions.
6959                  */
6960                 spin_lock_irqsave(&h->lock, flags);
6961                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6962                 spin_unlock_irqrestore(&h->lock, flags);
6963                 hpsa_free_irqs(h);
6964                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
6965                                         hpsa_intx_discard_completions);
6966                 if (rc) {
6967                         dev_warn(&h->pdev->dev,
6968                                 "Failed to request_irq after soft reset.\n");
6969                         goto clean4;
6970                 }
6971
6972                 rc = hpsa_kdump_soft_reset(h);
6973                 if (rc)
6974                         /* Neither hard nor soft reset worked, we're hosed. */
6975                         goto clean4;
6976
6977                 dev_info(&h->pdev->dev, "Board READY.\n");
6978                 dev_info(&h->pdev->dev,
6979                         "Waiting for stale completions to drain.\n");
6980                 h->access.set_intr_mask(h, HPSA_INTR_ON);
6981                 msleep(10000);
6982                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6983
6984                 rc = controller_reset_failed(h->cfgtable);
6985                 if (rc)
6986                         dev_info(&h->pdev->dev,
6987                                 "Soft reset appears to have failed.\n");
6988
6989                 /* since the controller's reset, we have to go back and re-init
6990                  * everything.  Easiest to just forget what we've done and do it
6991                  * all over again.
6992                  */
6993                 hpsa_undo_allocations_after_kdump_soft_reset(h);
6994                 try_soft_reset = 0;
6995                 if (rc)
6996                         /* don't go to clean4, we already unallocated */
6997                         return -ENODEV;
6998
6999                 goto reinit_after_soft_reset;
7000         }
7001
7002                 /* Enable Accelerated IO path at driver layer */
7003                 h->acciopath_status = 1;
7004
7005
7006         /* Turn the interrupts on so we can service requests */
7007         h->access.set_intr_mask(h, HPSA_INTR_ON);
7008
7009         hpsa_hba_inquiry(h);
7010         hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
7011
7012         /* Monitor the controller for firmware lockups */
7013         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7014         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7015         schedule_delayed_work(&h->monitor_ctlr_work,
7016                                 h->heartbeat_sample_interval);
7017         INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
7018         queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7019                                 h->heartbeat_sample_interval);
7020         return 0;
7021
7022 clean4:
7023         hpsa_free_sg_chain_blocks(h);
7024         hpsa_free_cmd_pool(h);
7025 clean2_and_free_irqs:
7026         hpsa_free_irqs(h);
7027 clean2:
7028 clean1:
7029         if (h->resubmit_wq)
7030                 destroy_workqueue(h->resubmit_wq);
7031         if (h->rescan_ctlr_wq)
7032                 destroy_workqueue(h->rescan_ctlr_wq);
7033         if (h->lockup_detected)
7034                 free_percpu(h->lockup_detected);
7035         kfree(h);
7036         return rc;
7037 }
7038
7039 static void hpsa_flush_cache(struct ctlr_info *h)
7040 {
7041         char *flush_buf;
7042         struct CommandList *c;
7043
7044         /* Don't bother trying to flush the cache if locked up */
7045         if (unlikely(lockup_detected(h)))
7046                 return;
7047         flush_buf = kzalloc(4, GFP_KERNEL);
7048         if (!flush_buf)
7049                 return;
7050
7051         c = cmd_alloc(h);
7052         if (!c) {
7053                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
7054                 goto out_of_memory;
7055         }
7056         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7057                 RAID_CTLR_LUNID, TYPE_CMD)) {
7058                 goto out;
7059         }
7060         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7061         if (c->err_info->CommandStatus != 0)
7062 out:
7063                 dev_warn(&h->pdev->dev,
7064                         "error flushing cache on controller\n");
7065         cmd_free(h, c);
7066 out_of_memory:
7067         kfree(flush_buf);
7068 }
7069
7070 static void hpsa_shutdown(struct pci_dev *pdev)
7071 {
7072         struct ctlr_info *h;
7073
7074         h = pci_get_drvdata(pdev);
7075         /* Turn board interrupts off  and send the flush cache command
7076          * sendcmd will turn off interrupt, and send the flush...
7077          * To write all data in the battery backed cache to disks
7078          */
7079         hpsa_flush_cache(h);
7080         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7081         hpsa_free_irqs_and_disable_msix(h);
7082 }
7083
7084 static void hpsa_free_device_info(struct ctlr_info *h)
7085 {
7086         int i;
7087
7088         for (i = 0; i < h->ndevices; i++)
7089                 kfree(h->dev[i]);
7090 }
7091
7092 static void hpsa_remove_one(struct pci_dev *pdev)
7093 {
7094         struct ctlr_info *h;
7095         unsigned long flags;
7096
7097         if (pci_get_drvdata(pdev) == NULL) {
7098                 dev_err(&pdev->dev, "unable to remove device\n");
7099                 return;
7100         }
7101         h = pci_get_drvdata(pdev);
7102
7103         /* Get rid of any controller monitoring work items */
7104         spin_lock_irqsave(&h->lock, flags);
7105         h->remove_in_progress = 1;
7106         spin_unlock_irqrestore(&h->lock, flags);
7107         cancel_delayed_work_sync(&h->monitor_ctlr_work);
7108         cancel_delayed_work_sync(&h->rescan_ctlr_work);
7109         destroy_workqueue(h->rescan_ctlr_wq);
7110         destroy_workqueue(h->resubmit_wq);
7111         hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
7112         hpsa_shutdown(pdev);
7113         iounmap(h->vaddr);
7114         iounmap(h->transtable);
7115         iounmap(h->cfgtable);
7116         hpsa_free_device_info(h);
7117         hpsa_free_sg_chain_blocks(h);
7118         pci_free_consistent(h->pdev,
7119                 h->nr_cmds * sizeof(struct CommandList),
7120                 h->cmd_pool, h->cmd_pool_dhandle);
7121         pci_free_consistent(h->pdev,
7122                 h->nr_cmds * sizeof(struct ErrorInfo),
7123                 h->errinfo_pool, h->errinfo_pool_dhandle);
7124         hpsa_free_reply_queues(h);
7125         kfree(h->cmd_pool_bits);
7126         kfree(h->blockFetchTable);
7127         kfree(h->ioaccel1_blockFetchTable);
7128         kfree(h->ioaccel2_blockFetchTable);
7129         kfree(h->hba_inquiry_data);
7130         pci_disable_device(pdev);
7131         pci_release_regions(pdev);
7132         free_percpu(h->lockup_detected);
7133         kfree(h);
7134 }
7135
7136 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7137         __attribute__((unused)) pm_message_t state)
7138 {
7139         return -ENOSYS;
7140 }
7141
7142 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7143 {
7144         return -ENOSYS;
7145 }
7146
7147 static struct pci_driver hpsa_pci_driver = {
7148         .name = HPSA,
7149         .probe = hpsa_init_one,
7150         .remove = hpsa_remove_one,
7151         .id_table = hpsa_pci_device_id, /* id_table */
7152         .shutdown = hpsa_shutdown,
7153         .suspend = hpsa_suspend,
7154         .resume = hpsa_resume,
7155 };
7156
7157 /* Fill in bucket_map[], given nsgs (the max number of
7158  * scatter gather elements supported) and bucket[],
7159  * which is an array of 8 integers.  The bucket[] array
7160  * contains 8 different DMA transfer sizes (in 16
7161  * byte increments) which the controller uses to fetch
7162  * commands.  This function fills in bucket_map[], which
7163  * maps a given number of scatter gather elements to one of
7164  * the 8 DMA transfer sizes.  The point of it is to allow the
7165  * controller to only do as much DMA as needed to fetch the
7166  * command, with the DMA transfer size encoded in the lower
7167  * bits of the command address.
7168  */
7169 static void  calc_bucket_map(int bucket[], int num_buckets,
7170         int nsgs, int min_blocks, u32 *bucket_map)
7171 {
7172         int i, j, b, size;
7173
7174         /* Note, bucket_map must have nsgs+1 entries. */
7175         for (i = 0; i <= nsgs; i++) {
7176                 /* Compute size of a command with i SG entries */
7177                 size = i + min_blocks;
7178                 b = num_buckets; /* Assume the biggest bucket */
7179                 /* Find the bucket that is just big enough */
7180                 for (j = 0; j < num_buckets; j++) {
7181                         if (bucket[j] >= size) {
7182                                 b = j;
7183                                 break;
7184                         }
7185                 }
7186                 /* for a command with i SG entries, use bucket b. */
7187                 bucket_map[i] = b;
7188         }
7189 }
7190
7191 /* return -ENODEV or other reason on error, 0 on success */
7192 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7193 {
7194         int i;
7195         unsigned long register_value;
7196         unsigned long transMethod = CFGTBL_Trans_Performant |
7197                         (trans_support & CFGTBL_Trans_use_short_tags) |
7198                                 CFGTBL_Trans_enable_directed_msix |
7199                         (trans_support & (CFGTBL_Trans_io_accel1 |
7200                                 CFGTBL_Trans_io_accel2));
7201         struct access_method access = SA5_performant_access;
7202
7203         /* This is a bit complicated.  There are 8 registers on
7204          * the controller which we write to to tell it 8 different
7205          * sizes of commands which there may be.  It's a way of
7206          * reducing the DMA done to fetch each command.  Encoded into
7207          * each command's tag are 3 bits which communicate to the controller
7208          * which of the eight sizes that command fits within.  The size of
7209          * each command depends on how many scatter gather entries there are.
7210          * Each SG entry requires 16 bytes.  The eight registers are programmed
7211          * with the number of 16-byte blocks a command of that size requires.
7212          * The smallest command possible requires 5 such 16 byte blocks.
7213          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7214          * blocks.  Note, this only extends to the SG entries contained
7215          * within the command block, and does not extend to chained blocks
7216          * of SG elements.   bft[] contains the eight values we write to
7217          * the registers.  They are not evenly distributed, but have more
7218          * sizes for small commands, and fewer sizes for larger commands.
7219          */
7220         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7221 #define MIN_IOACCEL2_BFT_ENTRY 5
7222 #define HPSA_IOACCEL2_HEADER_SZ 4
7223         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7224                         13, 14, 15, 16, 17, 18, 19,
7225                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7226         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7227         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7228         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7229                                  16 * MIN_IOACCEL2_BFT_ENTRY);
7230         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7231         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7232         /*  5 = 1 s/g entry or 4k
7233          *  6 = 2 s/g entry or 8k
7234          *  8 = 4 s/g entry or 16k
7235          * 10 = 6 s/g entry or 24k
7236          */
7237
7238         /* If the controller supports either ioaccel method then
7239          * we can also use the RAID stack submit path that does not
7240          * perform the superfluous readl() after each command submission.
7241          */
7242         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7243                 access = SA5_performant_access_no_read;
7244
7245         /* Controller spec: zero out this buffer. */
7246         for (i = 0; i < h->nreply_queues; i++)
7247                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7248
7249         bft[7] = SG_ENTRIES_IN_CMD + 4;
7250         calc_bucket_map(bft, ARRAY_SIZE(bft),
7251                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7252         for (i = 0; i < 8; i++)
7253                 writel(bft[i], &h->transtable->BlockFetch[i]);
7254
7255         /* size of controller ring buffer */
7256         writel(h->max_commands, &h->transtable->RepQSize);
7257         writel(h->nreply_queues, &h->transtable->RepQCount);
7258         writel(0, &h->transtable->RepQCtrAddrLow32);
7259         writel(0, &h->transtable->RepQCtrAddrHigh32);
7260
7261         for (i = 0; i < h->nreply_queues; i++) {
7262                 writel(0, &h->transtable->RepQAddr[i].upper);
7263                 writel(h->reply_queue[i].busaddr,
7264                         &h->transtable->RepQAddr[i].lower);
7265         }
7266
7267         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7268         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7269         /*
7270          * enable outbound interrupt coalescing in accelerator mode;
7271          */
7272         if (trans_support & CFGTBL_Trans_io_accel1) {
7273                 access = SA5_ioaccel_mode1_access;
7274                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7275                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7276         } else {
7277                 if (trans_support & CFGTBL_Trans_io_accel2) {
7278                         access = SA5_ioaccel_mode2_access;
7279                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7280                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7281                 }
7282         }
7283         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7284         if (hpsa_wait_for_mode_change_ack(h)) {
7285                 dev_err(&h->pdev->dev,
7286                         "performant mode problem - doorbell timeout\n");
7287                 return -ENODEV;
7288         }
7289         register_value = readl(&(h->cfgtable->TransportActive));
7290         if (!(register_value & CFGTBL_Trans_Performant)) {
7291                 dev_err(&h->pdev->dev,
7292                         "performant mode problem - transport not active\n");
7293                 return -ENODEV;
7294         }
7295         /* Change the access methods to the performant access methods */
7296         h->access = access;
7297         h->transMethod = transMethod;
7298
7299         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7300                 (trans_support & CFGTBL_Trans_io_accel2)))
7301                 return 0;
7302
7303         if (trans_support & CFGTBL_Trans_io_accel1) {
7304                 /* Set up I/O accelerator mode */
7305                 for (i = 0; i < h->nreply_queues; i++) {
7306                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7307                         h->reply_queue[i].current_entry =
7308                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7309                 }
7310                 bft[7] = h->ioaccel_maxsg + 8;
7311                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7312                                 h->ioaccel1_blockFetchTable);
7313
7314                 /* initialize all reply queue entries to unused */
7315                 for (i = 0; i < h->nreply_queues; i++)
7316                         memset(h->reply_queue[i].head,
7317                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7318                                 h->reply_queue_size);
7319
7320                 /* set all the constant fields in the accelerator command
7321                  * frames once at init time to save CPU cycles later.
7322                  */
7323                 for (i = 0; i < h->nr_cmds; i++) {
7324                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7325
7326                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
7327                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
7328                                         (i * sizeof(struct ErrorInfo)));
7329                         cp->err_info_len = sizeof(struct ErrorInfo);
7330                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
7331                         cp->host_context_flags =
7332                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7333                         cp->timeout_sec = 0;
7334                         cp->ReplyQueue = 0;
7335                         cp->tag =
7336                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7337                         cp->host_addr =
7338                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7339                                         (i * sizeof(struct io_accel1_cmd)));
7340                 }
7341         } else if (trans_support & CFGTBL_Trans_io_accel2) {
7342                 u64 cfg_offset, cfg_base_addr_index;
7343                 u32 bft2_offset, cfg_base_addr;
7344                 int rc;
7345
7346                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7347                         &cfg_base_addr_index, &cfg_offset);
7348                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7349                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7350                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7351                                 4, h->ioaccel2_blockFetchTable);
7352                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7353                 BUILD_BUG_ON(offsetof(struct CfgTable,
7354                                 io_accel_request_size_offset) != 0xb8);
7355                 h->ioaccel2_bft2_regs =
7356                         remap_pci_mem(pci_resource_start(h->pdev,
7357                                         cfg_base_addr_index) +
7358                                         cfg_offset + bft2_offset,
7359                                         ARRAY_SIZE(bft2) *
7360                                         sizeof(*h->ioaccel2_bft2_regs));
7361                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7362                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7363         }
7364         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7365         if (hpsa_wait_for_mode_change_ack(h)) {
7366                 dev_err(&h->pdev->dev,
7367                         "performant mode problem - enabling ioaccel mode\n");
7368                 return -ENODEV;
7369         }
7370         return 0;
7371 }
7372
7373 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7374 {
7375         h->ioaccel_maxsg =
7376                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7377         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7378                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7379
7380         /* Command structures must be aligned on a 128-byte boundary
7381          * because the 7 lower bits of the address are used by the
7382          * hardware.
7383          */
7384         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7385                         IOACCEL1_COMMANDLIST_ALIGNMENT);
7386         h->ioaccel_cmd_pool =
7387                 pci_alloc_consistent(h->pdev,
7388                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7389                         &(h->ioaccel_cmd_pool_dhandle));
7390
7391         h->ioaccel1_blockFetchTable =
7392                 kmalloc(((h->ioaccel_maxsg + 1) *
7393                                 sizeof(u32)), GFP_KERNEL);
7394
7395         if ((h->ioaccel_cmd_pool == NULL) ||
7396                 (h->ioaccel1_blockFetchTable == NULL))
7397                 goto clean_up;
7398
7399         memset(h->ioaccel_cmd_pool, 0,
7400                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7401         return 0;
7402
7403 clean_up:
7404         if (h->ioaccel_cmd_pool)
7405                 pci_free_consistent(h->pdev,
7406                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7407                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7408         kfree(h->ioaccel1_blockFetchTable);
7409         return 1;
7410 }
7411
7412 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7413 {
7414         /* Allocate ioaccel2 mode command blocks and block fetch table */
7415
7416         h->ioaccel_maxsg =
7417                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7418         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7419                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7420
7421         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7422                         IOACCEL2_COMMANDLIST_ALIGNMENT);
7423         h->ioaccel2_cmd_pool =
7424                 pci_alloc_consistent(h->pdev,
7425                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7426                         &(h->ioaccel2_cmd_pool_dhandle));
7427
7428         h->ioaccel2_blockFetchTable =
7429                 kmalloc(((h->ioaccel_maxsg + 1) *
7430                                 sizeof(u32)), GFP_KERNEL);
7431
7432         if ((h->ioaccel2_cmd_pool == NULL) ||
7433                 (h->ioaccel2_blockFetchTable == NULL))
7434                 goto clean_up;
7435
7436         memset(h->ioaccel2_cmd_pool, 0,
7437                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7438         return 0;
7439
7440 clean_up:
7441         if (h->ioaccel2_cmd_pool)
7442                 pci_free_consistent(h->pdev,
7443                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7444                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7445         kfree(h->ioaccel2_blockFetchTable);
7446         return 1;
7447 }
7448
7449 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7450 {
7451         u32 trans_support;
7452         unsigned long transMethod = CFGTBL_Trans_Performant |
7453                                         CFGTBL_Trans_use_short_tags;
7454         int i;
7455
7456         if (hpsa_simple_mode)
7457                 return;
7458
7459         trans_support = readl(&(h->cfgtable->TransportSupport));
7460         if (!(trans_support & PERFORMANT_MODE))
7461                 return;
7462
7463         /* Check for I/O accelerator mode support */
7464         if (trans_support & CFGTBL_Trans_io_accel1) {
7465                 transMethod |= CFGTBL_Trans_io_accel1 |
7466                                 CFGTBL_Trans_enable_directed_msix;
7467                 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7468                         goto clean_up;
7469         } else {
7470                 if (trans_support & CFGTBL_Trans_io_accel2) {
7471                                 transMethod |= CFGTBL_Trans_io_accel2 |
7472                                 CFGTBL_Trans_enable_directed_msix;
7473                 if (ioaccel2_alloc_cmds_and_bft(h))
7474                         goto clean_up;
7475                 }
7476         }
7477
7478         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7479         hpsa_get_max_perf_mode_cmds(h);
7480         /* Performant mode ring buffer and supporting data structures */
7481         h->reply_queue_size = h->max_commands * sizeof(u64);
7482
7483         for (i = 0; i < h->nreply_queues; i++) {
7484                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7485                                                 h->reply_queue_size,
7486                                                 &(h->reply_queue[i].busaddr));
7487                 if (!h->reply_queue[i].head)
7488                         goto clean_up;
7489                 h->reply_queue[i].size = h->max_commands;
7490                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
7491                 h->reply_queue[i].current_entry = 0;
7492         }
7493
7494         /* Need a block fetch table for performant mode */
7495         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7496                                 sizeof(u32)), GFP_KERNEL);
7497         if (!h->blockFetchTable)
7498                 goto clean_up;
7499
7500         hpsa_enter_performant_mode(h, trans_support);
7501         return;
7502
7503 clean_up:
7504         hpsa_free_reply_queues(h);
7505         kfree(h->blockFetchTable);
7506 }
7507
7508 static int is_accelerated_cmd(struct CommandList *c)
7509 {
7510         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7511 }
7512
7513 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7514 {
7515         struct CommandList *c = NULL;
7516         int i, accel_cmds_out;
7517         int refcount;
7518
7519         do { /* wait for all outstanding ioaccel commands to drain out */
7520                 accel_cmds_out = 0;
7521                 for (i = 0; i < h->nr_cmds; i++) {
7522                         c = h->cmd_pool + i;
7523                         refcount = atomic_inc_return(&c->refcount);
7524                         if (refcount > 1) /* Command is allocated */
7525                                 accel_cmds_out += is_accelerated_cmd(c);
7526                         cmd_free(h, c);
7527                 }
7528                 if (accel_cmds_out <= 0)
7529                         break;
7530                 msleep(100);
7531         } while (1);
7532 }
7533
7534 /*
7535  *  This is it.  Register the PCI driver information for the cards we control
7536  *  the OS will call our registered routines when it finds one of our cards.
7537  */
7538 static int __init hpsa_init(void)
7539 {
7540         return pci_register_driver(&hpsa_pci_driver);
7541 }
7542
7543 static void __exit hpsa_cleanup(void)
7544 {
7545         pci_unregister_driver(&hpsa_pci_driver);
7546 }
7547
7548 static void __attribute__((unused)) verify_offsets(void)
7549 {
7550 #define VERIFY_OFFSET(member, offset) \
7551         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7552
7553         VERIFY_OFFSET(structure_size, 0);
7554         VERIFY_OFFSET(volume_blk_size, 4);
7555         VERIFY_OFFSET(volume_blk_cnt, 8);
7556         VERIFY_OFFSET(phys_blk_shift, 16);
7557         VERIFY_OFFSET(parity_rotation_shift, 17);
7558         VERIFY_OFFSET(strip_size, 18);
7559         VERIFY_OFFSET(disk_starting_blk, 20);
7560         VERIFY_OFFSET(disk_blk_cnt, 28);
7561         VERIFY_OFFSET(data_disks_per_row, 36);
7562         VERIFY_OFFSET(metadata_disks_per_row, 38);
7563         VERIFY_OFFSET(row_cnt, 40);
7564         VERIFY_OFFSET(layout_map_count, 42);
7565         VERIFY_OFFSET(flags, 44);
7566         VERIFY_OFFSET(dekindex, 46);
7567         /* VERIFY_OFFSET(reserved, 48 */
7568         VERIFY_OFFSET(data, 64);
7569
7570 #undef VERIFY_OFFSET
7571
7572 #define VERIFY_OFFSET(member, offset) \
7573         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7574
7575         VERIFY_OFFSET(IU_type, 0);
7576         VERIFY_OFFSET(direction, 1);
7577         VERIFY_OFFSET(reply_queue, 2);
7578         /* VERIFY_OFFSET(reserved1, 3);  */
7579         VERIFY_OFFSET(scsi_nexus, 4);
7580         VERIFY_OFFSET(Tag, 8);
7581         VERIFY_OFFSET(cdb, 16);
7582         VERIFY_OFFSET(cciss_lun, 32);
7583         VERIFY_OFFSET(data_len, 40);
7584         VERIFY_OFFSET(cmd_priority_task_attr, 44);
7585         VERIFY_OFFSET(sg_count, 45);
7586         /* VERIFY_OFFSET(reserved3 */
7587         VERIFY_OFFSET(err_ptr, 48);
7588         VERIFY_OFFSET(err_len, 56);
7589         /* VERIFY_OFFSET(reserved4  */
7590         VERIFY_OFFSET(sg, 64);
7591
7592 #undef VERIFY_OFFSET
7593
7594 #define VERIFY_OFFSET(member, offset) \
7595         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7596
7597         VERIFY_OFFSET(dev_handle, 0x00);
7598         VERIFY_OFFSET(reserved1, 0x02);
7599         VERIFY_OFFSET(function, 0x03);
7600         VERIFY_OFFSET(reserved2, 0x04);
7601         VERIFY_OFFSET(err_info, 0x0C);
7602         VERIFY_OFFSET(reserved3, 0x10);
7603         VERIFY_OFFSET(err_info_len, 0x12);
7604         VERIFY_OFFSET(reserved4, 0x13);
7605         VERIFY_OFFSET(sgl_offset, 0x14);
7606         VERIFY_OFFSET(reserved5, 0x15);
7607         VERIFY_OFFSET(transfer_len, 0x1C);
7608         VERIFY_OFFSET(reserved6, 0x20);
7609         VERIFY_OFFSET(io_flags, 0x24);
7610         VERIFY_OFFSET(reserved7, 0x26);
7611         VERIFY_OFFSET(LUN, 0x34);
7612         VERIFY_OFFSET(control, 0x3C);
7613         VERIFY_OFFSET(CDB, 0x40);
7614         VERIFY_OFFSET(reserved8, 0x50);
7615         VERIFY_OFFSET(host_context_flags, 0x60);
7616         VERIFY_OFFSET(timeout_sec, 0x62);
7617         VERIFY_OFFSET(ReplyQueue, 0x64);
7618         VERIFY_OFFSET(reserved9, 0x65);
7619         VERIFY_OFFSET(tag, 0x68);
7620         VERIFY_OFFSET(host_addr, 0x70);
7621         VERIFY_OFFSET(CISS_LUN, 0x78);
7622         VERIFY_OFFSET(SG, 0x78 + 8);
7623 #undef VERIFY_OFFSET
7624 }
7625
7626 module_init(hpsa_init);
7627 module_exit(hpsa_cleanup);