]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/hpsa.c
Merge branch 'drm-next-3.16' of git://people.freedesktop.org/~agd5f/linux into drm...
[karo-tx-linux.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/fs.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu.h>
52 #include <asm/div64.h>
53 #include "hpsa_cmd.h"
54 #include "hpsa.h"
55
56 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57 #define HPSA_DRIVER_VERSION "3.4.4-1"
58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59 #define HPSA "hpsa"
60
61 /* How long to wait (in milliseconds) for board to go into simple mode */
62 #define MAX_CONFIG_WAIT 30000
63 #define MAX_IOCTL_CONFIG_WAIT 1000
64
65 /*define how many times we will try a command because of bus resets */
66 #define MAX_CMD_RETRIES 3
67
68 /* Embedded module documentation macros - see modules.h */
69 MODULE_AUTHOR("Hewlett-Packard Company");
70 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71         HPSA_DRIVER_VERSION);
72 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73 MODULE_VERSION(HPSA_DRIVER_VERSION);
74 MODULE_LICENSE("GPL");
75
76 static int hpsa_allow_any;
77 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78 MODULE_PARM_DESC(hpsa_allow_any,
79                 "Allow hpsa driver to access unknown HP Smart Array hardware");
80 static int hpsa_simple_mode;
81 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(hpsa_simple_mode,
83         "Use 'simple mode' rather than 'performant mode'");
84
85 /* define the PCI info for the cards we can control */
86 static const struct pci_device_id hpsa_pci_device_id[] = {
87         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
88         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
89         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
90         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
91         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
92         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
93         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1925},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
128         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
129         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
130         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
131         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
132         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
133         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
134                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
135         {0,}
136 };
137
138 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
139
140 /*  board_id = Subsystem Device ID & Vendor ID
141  *  product = Marketing Name for the board
142  *  access = Address of the struct of function pointers
143  */
144 static struct board_type products[] = {
145         {0x3241103C, "Smart Array P212", &SA5_access},
146         {0x3243103C, "Smart Array P410", &SA5_access},
147         {0x3245103C, "Smart Array P410i", &SA5_access},
148         {0x3247103C, "Smart Array P411", &SA5_access},
149         {0x3249103C, "Smart Array P812", &SA5_access},
150         {0x324A103C, "Smart Array P712m", &SA5_access},
151         {0x324B103C, "Smart Array P711m", &SA5_access},
152         {0x3350103C, "Smart Array P222", &SA5_access},
153         {0x3351103C, "Smart Array P420", &SA5_access},
154         {0x3352103C, "Smart Array P421", &SA5_access},
155         {0x3353103C, "Smart Array P822", &SA5_access},
156         {0x3354103C, "Smart Array P420i", &SA5_access},
157         {0x3355103C, "Smart Array P220i", &SA5_access},
158         {0x3356103C, "Smart Array P721m", &SA5_access},
159         {0x1921103C, "Smart Array P830i", &SA5_access},
160         {0x1922103C, "Smart Array P430", &SA5_access},
161         {0x1923103C, "Smart Array P431", &SA5_access},
162         {0x1924103C, "Smart Array P830", &SA5_access},
163         {0x1926103C, "Smart Array P731m", &SA5_access},
164         {0x1928103C, "Smart Array P230i", &SA5_access},
165         {0x1929103C, "Smart Array P530", &SA5_access},
166         {0x21BD103C, "Smart Array", &SA5_access},
167         {0x21BE103C, "Smart Array", &SA5_access},
168         {0x21BF103C, "Smart Array", &SA5_access},
169         {0x21C0103C, "Smart Array", &SA5_access},
170         {0x21C1103C, "Smart Array", &SA5_access},
171         {0x21C2103C, "Smart Array", &SA5_access},
172         {0x21C3103C, "Smart Array", &SA5_access},
173         {0x21C4103C, "Smart Array", &SA5_access},
174         {0x21C5103C, "Smart Array", &SA5_access},
175         {0x21C6103C, "Smart Array", &SA5_access},
176         {0x21C7103C, "Smart Array", &SA5_access},
177         {0x21C8103C, "Smart Array", &SA5_access},
178         {0x21C9103C, "Smart Array", &SA5_access},
179         {0x21CA103C, "Smart Array", &SA5_access},
180         {0x21CB103C, "Smart Array", &SA5_access},
181         {0x21CC103C, "Smart Array", &SA5_access},
182         {0x21CD103C, "Smart Array", &SA5_access},
183         {0x21CE103C, "Smart Array", &SA5_access},
184         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
185         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
186         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
187         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
188         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
189         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
190 };
191
192 static int number_of_controllers;
193
194 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
195 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
196 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
197 static void lock_and_start_io(struct ctlr_info *h);
198 static void start_io(struct ctlr_info *h, unsigned long *flags);
199
200 #ifdef CONFIG_COMPAT
201 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
202 #endif
203
204 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
205 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
206 static struct CommandList *cmd_alloc(struct ctlr_info *h);
207 static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
208 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
209         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
210         int cmd_type);
211 #define VPD_PAGE (1 << 8)
212
213 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
214 static void hpsa_scan_start(struct Scsi_Host *);
215 static int hpsa_scan_finished(struct Scsi_Host *sh,
216         unsigned long elapsed_time);
217 static int hpsa_change_queue_depth(struct scsi_device *sdev,
218         int qdepth, int reason);
219
220 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
221 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
222 static int hpsa_slave_alloc(struct scsi_device *sdev);
223 static void hpsa_slave_destroy(struct scsi_device *sdev);
224
225 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
226 static int check_for_unit_attention(struct ctlr_info *h,
227         struct CommandList *c);
228 static void check_ioctl_unit_attention(struct ctlr_info *h,
229         struct CommandList *c);
230 /* performant mode helper functions */
231 static void calc_bucket_map(int *bucket, int num_buckets,
232         int nsgs, int min_blocks, int *bucket_map);
233 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
234 static inline u32 next_command(struct ctlr_info *h, u8 q);
235 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
236                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
237                                u64 *cfg_offset);
238 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
239                                     unsigned long *memory_bar);
240 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
241 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
242                                      int wait_for_ready);
243 static inline void finish_cmd(struct CommandList *c);
244 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
245 #define BOARD_NOT_READY 0
246 #define BOARD_READY 1
247 static void hpsa_drain_accel_commands(struct ctlr_info *h);
248 static void hpsa_flush_cache(struct ctlr_info *h);
249 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
250         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
251         u8 *scsi3addr);
252
253 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
254 {
255         unsigned long *priv = shost_priv(sdev->host);
256         return (struct ctlr_info *) *priv;
257 }
258
259 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
260 {
261         unsigned long *priv = shost_priv(sh);
262         return (struct ctlr_info *) *priv;
263 }
264
265 static int check_for_unit_attention(struct ctlr_info *h,
266         struct CommandList *c)
267 {
268         if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
269                 return 0;
270
271         switch (c->err_info->SenseInfo[12]) {
272         case STATE_CHANGED:
273                 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
274                         "detected, command retried\n", h->ctlr);
275                 break;
276         case LUN_FAILED:
277                 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
278                         "detected, action required\n", h->ctlr);
279                 break;
280         case REPORT_LUNS_CHANGED:
281                 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
282                         "changed, action required\n", h->ctlr);
283         /*
284          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285          * target (array) devices.
286          */
287                 break;
288         case POWER_OR_RESET:
289                 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
290                         "or device reset detected\n", h->ctlr);
291                 break;
292         case UNIT_ATTENTION_CLEARED:
293                 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
294                     "cleared by another initiator\n", h->ctlr);
295                 break;
296         default:
297                 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
298                         "unit attention detected\n", h->ctlr);
299                 break;
300         }
301         return 1;
302 }
303
304 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
305 {
306         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
307                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
308                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
309                 return 0;
310         dev_warn(&h->pdev->dev, HPSA "device busy");
311         return 1;
312 }
313
314 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
315                                          struct device_attribute *attr,
316                                          const char *buf, size_t count)
317 {
318         int status, len;
319         struct ctlr_info *h;
320         struct Scsi_Host *shost = class_to_shost(dev);
321         char tmpbuf[10];
322
323         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
324                 return -EACCES;
325         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
326         strncpy(tmpbuf, buf, len);
327         tmpbuf[len] = '\0';
328         if (sscanf(tmpbuf, "%d", &status) != 1)
329                 return -EINVAL;
330         h = shost_to_hba(shost);
331         h->acciopath_status = !!status;
332         dev_warn(&h->pdev->dev,
333                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
334                 h->acciopath_status ? "enabled" : "disabled");
335         return count;
336 }
337
338 static ssize_t host_store_raid_offload_debug(struct device *dev,
339                                          struct device_attribute *attr,
340                                          const char *buf, size_t count)
341 {
342         int debug_level, len;
343         struct ctlr_info *h;
344         struct Scsi_Host *shost = class_to_shost(dev);
345         char tmpbuf[10];
346
347         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
348                 return -EACCES;
349         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
350         strncpy(tmpbuf, buf, len);
351         tmpbuf[len] = '\0';
352         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
353                 return -EINVAL;
354         if (debug_level < 0)
355                 debug_level = 0;
356         h = shost_to_hba(shost);
357         h->raid_offload_debug = debug_level;
358         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
359                 h->raid_offload_debug);
360         return count;
361 }
362
363 static ssize_t host_store_rescan(struct device *dev,
364                                  struct device_attribute *attr,
365                                  const char *buf, size_t count)
366 {
367         struct ctlr_info *h;
368         struct Scsi_Host *shost = class_to_shost(dev);
369         h = shost_to_hba(shost);
370         hpsa_scan_start(h->scsi_host);
371         return count;
372 }
373
374 static ssize_t host_show_firmware_revision(struct device *dev,
375              struct device_attribute *attr, char *buf)
376 {
377         struct ctlr_info *h;
378         struct Scsi_Host *shost = class_to_shost(dev);
379         unsigned char *fwrev;
380
381         h = shost_to_hba(shost);
382         if (!h->hba_inquiry_data)
383                 return 0;
384         fwrev = &h->hba_inquiry_data[32];
385         return snprintf(buf, 20, "%c%c%c%c\n",
386                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
387 }
388
389 static ssize_t host_show_commands_outstanding(struct device *dev,
390              struct device_attribute *attr, char *buf)
391 {
392         struct Scsi_Host *shost = class_to_shost(dev);
393         struct ctlr_info *h = shost_to_hba(shost);
394
395         return snprintf(buf, 20, "%d\n", h->commands_outstanding);
396 }
397
398 static ssize_t host_show_transport_mode(struct device *dev,
399         struct device_attribute *attr, char *buf)
400 {
401         struct ctlr_info *h;
402         struct Scsi_Host *shost = class_to_shost(dev);
403
404         h = shost_to_hba(shost);
405         return snprintf(buf, 20, "%s\n",
406                 h->transMethod & CFGTBL_Trans_Performant ?
407                         "performant" : "simple");
408 }
409
410 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
411         struct device_attribute *attr, char *buf)
412 {
413         struct ctlr_info *h;
414         struct Scsi_Host *shost = class_to_shost(dev);
415
416         h = shost_to_hba(shost);
417         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
418                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
419 }
420
421 /* List of controllers which cannot be hard reset on kexec with reset_devices */
422 static u32 unresettable_controller[] = {
423         0x324a103C, /* Smart Array P712m */
424         0x324b103C, /* SmartArray P711m */
425         0x3223103C, /* Smart Array P800 */
426         0x3234103C, /* Smart Array P400 */
427         0x3235103C, /* Smart Array P400i */
428         0x3211103C, /* Smart Array E200i */
429         0x3212103C, /* Smart Array E200 */
430         0x3213103C, /* Smart Array E200i */
431         0x3214103C, /* Smart Array E200i */
432         0x3215103C, /* Smart Array E200i */
433         0x3237103C, /* Smart Array E500 */
434         0x323D103C, /* Smart Array P700m */
435         0x40800E11, /* Smart Array 5i */
436         0x409C0E11, /* Smart Array 6400 */
437         0x409D0E11, /* Smart Array 6400 EM */
438         0x40700E11, /* Smart Array 5300 */
439         0x40820E11, /* Smart Array 532 */
440         0x40830E11, /* Smart Array 5312 */
441         0x409A0E11, /* Smart Array 641 */
442         0x409B0E11, /* Smart Array 642 */
443         0x40910E11, /* Smart Array 6i */
444 };
445
446 /* List of controllers which cannot even be soft reset */
447 static u32 soft_unresettable_controller[] = {
448         0x40800E11, /* Smart Array 5i */
449         0x40700E11, /* Smart Array 5300 */
450         0x40820E11, /* Smart Array 532 */
451         0x40830E11, /* Smart Array 5312 */
452         0x409A0E11, /* Smart Array 641 */
453         0x409B0E11, /* Smart Array 642 */
454         0x40910E11, /* Smart Array 6i */
455         /* Exclude 640x boards.  These are two pci devices in one slot
456          * which share a battery backed cache module.  One controls the
457          * cache, the other accesses the cache through the one that controls
458          * it.  If we reset the one controlling the cache, the other will
459          * likely not be happy.  Just forbid resetting this conjoined mess.
460          * The 640x isn't really supported by hpsa anyway.
461          */
462         0x409C0E11, /* Smart Array 6400 */
463         0x409D0E11, /* Smart Array 6400 EM */
464 };
465
466 static int ctlr_is_hard_resettable(u32 board_id)
467 {
468         int i;
469
470         for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
471                 if (unresettable_controller[i] == board_id)
472                         return 0;
473         return 1;
474 }
475
476 static int ctlr_is_soft_resettable(u32 board_id)
477 {
478         int i;
479
480         for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
481                 if (soft_unresettable_controller[i] == board_id)
482                         return 0;
483         return 1;
484 }
485
486 static int ctlr_is_resettable(u32 board_id)
487 {
488         return ctlr_is_hard_resettable(board_id) ||
489                 ctlr_is_soft_resettable(board_id);
490 }
491
492 static ssize_t host_show_resettable(struct device *dev,
493         struct device_attribute *attr, char *buf)
494 {
495         struct ctlr_info *h;
496         struct Scsi_Host *shost = class_to_shost(dev);
497
498         h = shost_to_hba(shost);
499         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
500 }
501
502 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
503 {
504         return (scsi3addr[3] & 0xC0) == 0x40;
505 }
506
507 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
508         "1(ADM)", "UNKNOWN"
509 };
510 #define HPSA_RAID_0     0
511 #define HPSA_RAID_4     1
512 #define HPSA_RAID_1     2       /* also used for RAID 10 */
513 #define HPSA_RAID_5     3       /* also used for RAID 50 */
514 #define HPSA_RAID_51    4
515 #define HPSA_RAID_6     5       /* also used for RAID 60 */
516 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
517 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
518
519 static ssize_t raid_level_show(struct device *dev,
520              struct device_attribute *attr, char *buf)
521 {
522         ssize_t l = 0;
523         unsigned char rlevel;
524         struct ctlr_info *h;
525         struct scsi_device *sdev;
526         struct hpsa_scsi_dev_t *hdev;
527         unsigned long flags;
528
529         sdev = to_scsi_device(dev);
530         h = sdev_to_hba(sdev);
531         spin_lock_irqsave(&h->lock, flags);
532         hdev = sdev->hostdata;
533         if (!hdev) {
534                 spin_unlock_irqrestore(&h->lock, flags);
535                 return -ENODEV;
536         }
537
538         /* Is this even a logical drive? */
539         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
540                 spin_unlock_irqrestore(&h->lock, flags);
541                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
542                 return l;
543         }
544
545         rlevel = hdev->raid_level;
546         spin_unlock_irqrestore(&h->lock, flags);
547         if (rlevel > RAID_UNKNOWN)
548                 rlevel = RAID_UNKNOWN;
549         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
550         return l;
551 }
552
553 static ssize_t lunid_show(struct device *dev,
554              struct device_attribute *attr, char *buf)
555 {
556         struct ctlr_info *h;
557         struct scsi_device *sdev;
558         struct hpsa_scsi_dev_t *hdev;
559         unsigned long flags;
560         unsigned char lunid[8];
561
562         sdev = to_scsi_device(dev);
563         h = sdev_to_hba(sdev);
564         spin_lock_irqsave(&h->lock, flags);
565         hdev = sdev->hostdata;
566         if (!hdev) {
567                 spin_unlock_irqrestore(&h->lock, flags);
568                 return -ENODEV;
569         }
570         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
571         spin_unlock_irqrestore(&h->lock, flags);
572         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
573                 lunid[0], lunid[1], lunid[2], lunid[3],
574                 lunid[4], lunid[5], lunid[6], lunid[7]);
575 }
576
577 static ssize_t unique_id_show(struct device *dev,
578              struct device_attribute *attr, char *buf)
579 {
580         struct ctlr_info *h;
581         struct scsi_device *sdev;
582         struct hpsa_scsi_dev_t *hdev;
583         unsigned long flags;
584         unsigned char sn[16];
585
586         sdev = to_scsi_device(dev);
587         h = sdev_to_hba(sdev);
588         spin_lock_irqsave(&h->lock, flags);
589         hdev = sdev->hostdata;
590         if (!hdev) {
591                 spin_unlock_irqrestore(&h->lock, flags);
592                 return -ENODEV;
593         }
594         memcpy(sn, hdev->device_id, sizeof(sn));
595         spin_unlock_irqrestore(&h->lock, flags);
596         return snprintf(buf, 16 * 2 + 2,
597                         "%02X%02X%02X%02X%02X%02X%02X%02X"
598                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
599                         sn[0], sn[1], sn[2], sn[3],
600                         sn[4], sn[5], sn[6], sn[7],
601                         sn[8], sn[9], sn[10], sn[11],
602                         sn[12], sn[13], sn[14], sn[15]);
603 }
604
605 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
606              struct device_attribute *attr, char *buf)
607 {
608         struct ctlr_info *h;
609         struct scsi_device *sdev;
610         struct hpsa_scsi_dev_t *hdev;
611         unsigned long flags;
612         int offload_enabled;
613
614         sdev = to_scsi_device(dev);
615         h = sdev_to_hba(sdev);
616         spin_lock_irqsave(&h->lock, flags);
617         hdev = sdev->hostdata;
618         if (!hdev) {
619                 spin_unlock_irqrestore(&h->lock, flags);
620                 return -ENODEV;
621         }
622         offload_enabled = hdev->offload_enabled;
623         spin_unlock_irqrestore(&h->lock, flags);
624         return snprintf(buf, 20, "%d\n", offload_enabled);
625 }
626
627 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
628 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
629 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
630 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
631 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
632                         host_show_hp_ssd_smart_path_enabled, NULL);
633 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
634                 host_show_hp_ssd_smart_path_status,
635                 host_store_hp_ssd_smart_path_status);
636 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
637                         host_store_raid_offload_debug);
638 static DEVICE_ATTR(firmware_revision, S_IRUGO,
639         host_show_firmware_revision, NULL);
640 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
641         host_show_commands_outstanding, NULL);
642 static DEVICE_ATTR(transport_mode, S_IRUGO,
643         host_show_transport_mode, NULL);
644 static DEVICE_ATTR(resettable, S_IRUGO,
645         host_show_resettable, NULL);
646
647 static struct device_attribute *hpsa_sdev_attrs[] = {
648         &dev_attr_raid_level,
649         &dev_attr_lunid,
650         &dev_attr_unique_id,
651         &dev_attr_hp_ssd_smart_path_enabled,
652         NULL,
653 };
654
655 static struct device_attribute *hpsa_shost_attrs[] = {
656         &dev_attr_rescan,
657         &dev_attr_firmware_revision,
658         &dev_attr_commands_outstanding,
659         &dev_attr_transport_mode,
660         &dev_attr_resettable,
661         &dev_attr_hp_ssd_smart_path_status,
662         &dev_attr_raid_offload_debug,
663         NULL,
664 };
665
666 static struct scsi_host_template hpsa_driver_template = {
667         .module                 = THIS_MODULE,
668         .name                   = HPSA,
669         .proc_name              = HPSA,
670         .queuecommand           = hpsa_scsi_queue_command,
671         .scan_start             = hpsa_scan_start,
672         .scan_finished          = hpsa_scan_finished,
673         .change_queue_depth     = hpsa_change_queue_depth,
674         .this_id                = -1,
675         .use_clustering         = ENABLE_CLUSTERING,
676         .eh_abort_handler       = hpsa_eh_abort_handler,
677         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
678         .ioctl                  = hpsa_ioctl,
679         .slave_alloc            = hpsa_slave_alloc,
680         .slave_destroy          = hpsa_slave_destroy,
681 #ifdef CONFIG_COMPAT
682         .compat_ioctl           = hpsa_compat_ioctl,
683 #endif
684         .sdev_attrs = hpsa_sdev_attrs,
685         .shost_attrs = hpsa_shost_attrs,
686         .max_sectors = 8192,
687         .no_write_same = 1,
688 };
689
690
691 /* Enqueuing and dequeuing functions for cmdlists. */
692 static inline void addQ(struct list_head *list, struct CommandList *c)
693 {
694         list_add_tail(&c->list, list);
695 }
696
697 static inline u32 next_command(struct ctlr_info *h, u8 q)
698 {
699         u32 a;
700         struct reply_queue_buffer *rq = &h->reply_queue[q];
701         unsigned long flags;
702
703         if (h->transMethod & CFGTBL_Trans_io_accel1)
704                 return h->access.command_completed(h, q);
705
706         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
707                 return h->access.command_completed(h, q);
708
709         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
710                 a = rq->head[rq->current_entry];
711                 rq->current_entry++;
712                 spin_lock_irqsave(&h->lock, flags);
713                 h->commands_outstanding--;
714                 spin_unlock_irqrestore(&h->lock, flags);
715         } else {
716                 a = FIFO_EMPTY;
717         }
718         /* Check for wraparound */
719         if (rq->current_entry == h->max_commands) {
720                 rq->current_entry = 0;
721                 rq->wraparound ^= 1;
722         }
723         return a;
724 }
725
726 /*
727  * There are some special bits in the bus address of the
728  * command that we have to set for the controller to know
729  * how to process the command:
730  *
731  * Normal performant mode:
732  * bit 0: 1 means performant mode, 0 means simple mode.
733  * bits 1-3 = block fetch table entry
734  * bits 4-6 = command type (== 0)
735  *
736  * ioaccel1 mode:
737  * bit 0 = "performant mode" bit.
738  * bits 1-3 = block fetch table entry
739  * bits 4-6 = command type (== 110)
740  * (command type is needed because ioaccel1 mode
741  * commands are submitted through the same register as normal
742  * mode commands, so this is how the controller knows whether
743  * the command is normal mode or ioaccel1 mode.)
744  *
745  * ioaccel2 mode:
746  * bit 0 = "performant mode" bit.
747  * bits 1-4 = block fetch table entry (note extra bit)
748  * bits 4-6 = not needed, because ioaccel2 mode has
749  * a separate special register for submitting commands.
750  */
751
752 /* set_performant_mode: Modify the tag for cciss performant
753  * set bit 0 for pull model, bits 3-1 for block fetch
754  * register number
755  */
756 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
757 {
758         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
759                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
760                 if (likely(h->msix_vector > 0))
761                         c->Header.ReplyQueue =
762                                 raw_smp_processor_id() % h->nreply_queues;
763         }
764 }
765
766 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
767                                                 struct CommandList *c)
768 {
769         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
770
771         /* Tell the controller to post the reply to the queue for this
772          * processor.  This seems to give the best I/O throughput.
773          */
774         cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
775         /* Set the bits in the address sent down to include:
776          *  - performant mode bit (bit 0)
777          *  - pull count (bits 1-3)
778          *  - command type (bits 4-6)
779          */
780         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
781                                         IOACCEL1_BUSADDR_CMDTYPE;
782 }
783
784 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
785                                                 struct CommandList *c)
786 {
787         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
788
789         /* Tell the controller to post the reply to the queue for this
790          * processor.  This seems to give the best I/O throughput.
791          */
792         cp->reply_queue = smp_processor_id() % h->nreply_queues;
793         /* Set the bits in the address sent down to include:
794          *  - performant mode bit not used in ioaccel mode 2
795          *  - pull count (bits 0-3)
796          *  - command type isn't needed for ioaccel2
797          */
798         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
799 }
800
801 static int is_firmware_flash_cmd(u8 *cdb)
802 {
803         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
804 }
805
806 /*
807  * During firmware flash, the heartbeat register may not update as frequently
808  * as it should.  So we dial down lockup detection during firmware flash. and
809  * dial it back up when firmware flash completes.
810  */
811 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
812 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
813 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
814                 struct CommandList *c)
815 {
816         if (!is_firmware_flash_cmd(c->Request.CDB))
817                 return;
818         atomic_inc(&h->firmware_flash_in_progress);
819         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
820 }
821
822 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
823                 struct CommandList *c)
824 {
825         if (is_firmware_flash_cmd(c->Request.CDB) &&
826                 atomic_dec_and_test(&h->firmware_flash_in_progress))
827                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
828 }
829
830 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
831         struct CommandList *c)
832 {
833         unsigned long flags;
834
835         switch (c->cmd_type) {
836         case CMD_IOACCEL1:
837                 set_ioaccel1_performant_mode(h, c);
838                 break;
839         case CMD_IOACCEL2:
840                 set_ioaccel2_performant_mode(h, c);
841                 break;
842         default:
843                 set_performant_mode(h, c);
844         }
845         dial_down_lockup_detection_during_fw_flash(h, c);
846         spin_lock_irqsave(&h->lock, flags);
847         addQ(&h->reqQ, c);
848         h->Qdepth++;
849         start_io(h, &flags);
850         spin_unlock_irqrestore(&h->lock, flags);
851 }
852
853 static inline void removeQ(struct CommandList *c)
854 {
855         if (WARN_ON(list_empty(&c->list)))
856                 return;
857         list_del_init(&c->list);
858 }
859
860 static inline int is_hba_lunid(unsigned char scsi3addr[])
861 {
862         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
863 }
864
865 static inline int is_scsi_rev_5(struct ctlr_info *h)
866 {
867         if (!h->hba_inquiry_data)
868                 return 0;
869         if ((h->hba_inquiry_data[2] & 0x07) == 5)
870                 return 1;
871         return 0;
872 }
873
874 static int hpsa_find_target_lun(struct ctlr_info *h,
875         unsigned char scsi3addr[], int bus, int *target, int *lun)
876 {
877         /* finds an unused bus, target, lun for a new physical device
878          * assumes h->devlock is held
879          */
880         int i, found = 0;
881         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
882
883         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
884
885         for (i = 0; i < h->ndevices; i++) {
886                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
887                         __set_bit(h->dev[i]->target, lun_taken);
888         }
889
890         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
891         if (i < HPSA_MAX_DEVICES) {
892                 /* *bus = 1; */
893                 *target = i;
894                 *lun = 0;
895                 found = 1;
896         }
897         return !found;
898 }
899
900 /* Add an entry into h->dev[] array. */
901 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
902                 struct hpsa_scsi_dev_t *device,
903                 struct hpsa_scsi_dev_t *added[], int *nadded)
904 {
905         /* assumes h->devlock is held */
906         int n = h->ndevices;
907         int i;
908         unsigned char addr1[8], addr2[8];
909         struct hpsa_scsi_dev_t *sd;
910
911         if (n >= HPSA_MAX_DEVICES) {
912                 dev_err(&h->pdev->dev, "too many devices, some will be "
913                         "inaccessible.\n");
914                 return -1;
915         }
916
917         /* physical devices do not have lun or target assigned until now. */
918         if (device->lun != -1)
919                 /* Logical device, lun is already assigned. */
920                 goto lun_assigned;
921
922         /* If this device a non-zero lun of a multi-lun device
923          * byte 4 of the 8-byte LUN addr will contain the logical
924          * unit no, zero otherise.
925          */
926         if (device->scsi3addr[4] == 0) {
927                 /* This is not a non-zero lun of a multi-lun device */
928                 if (hpsa_find_target_lun(h, device->scsi3addr,
929                         device->bus, &device->target, &device->lun) != 0)
930                         return -1;
931                 goto lun_assigned;
932         }
933
934         /* This is a non-zero lun of a multi-lun device.
935          * Search through our list and find the device which
936          * has the same 8 byte LUN address, excepting byte 4.
937          * Assign the same bus and target for this new LUN.
938          * Use the logical unit number from the firmware.
939          */
940         memcpy(addr1, device->scsi3addr, 8);
941         addr1[4] = 0;
942         for (i = 0; i < n; i++) {
943                 sd = h->dev[i];
944                 memcpy(addr2, sd->scsi3addr, 8);
945                 addr2[4] = 0;
946                 /* differ only in byte 4? */
947                 if (memcmp(addr1, addr2, 8) == 0) {
948                         device->bus = sd->bus;
949                         device->target = sd->target;
950                         device->lun = device->scsi3addr[4];
951                         break;
952                 }
953         }
954         if (device->lun == -1) {
955                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
956                         " suspect firmware bug or unsupported hardware "
957                         "configuration.\n");
958                         return -1;
959         }
960
961 lun_assigned:
962
963         h->dev[n] = device;
964         h->ndevices++;
965         added[*nadded] = device;
966         (*nadded)++;
967
968         /* initially, (before registering with scsi layer) we don't
969          * know our hostno and we don't want to print anything first
970          * time anyway (the scsi layer's inquiries will show that info)
971          */
972         /* if (hostno != -1) */
973                 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
974                         scsi_device_type(device->devtype), hostno,
975                         device->bus, device->target, device->lun);
976         return 0;
977 }
978
979 /* Update an entry in h->dev[] array. */
980 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
981         int entry, struct hpsa_scsi_dev_t *new_entry)
982 {
983         /* assumes h->devlock is held */
984         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
985
986         /* Raid level changed. */
987         h->dev[entry]->raid_level = new_entry->raid_level;
988
989         /* Raid offload parameters changed. */
990         h->dev[entry]->offload_config = new_entry->offload_config;
991         h->dev[entry]->offload_enabled = new_entry->offload_enabled;
992         h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
993         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
994         h->dev[entry]->raid_map = new_entry->raid_map;
995
996         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
997                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
998                 new_entry->target, new_entry->lun);
999 }
1000
1001 /* Replace an entry from h->dev[] array. */
1002 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1003         int entry, struct hpsa_scsi_dev_t *new_entry,
1004         struct hpsa_scsi_dev_t *added[], int *nadded,
1005         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1006 {
1007         /* assumes h->devlock is held */
1008         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1009         removed[*nremoved] = h->dev[entry];
1010         (*nremoved)++;
1011
1012         /*
1013          * New physical devices won't have target/lun assigned yet
1014          * so we need to preserve the values in the slot we are replacing.
1015          */
1016         if (new_entry->target == -1) {
1017                 new_entry->target = h->dev[entry]->target;
1018                 new_entry->lun = h->dev[entry]->lun;
1019         }
1020
1021         h->dev[entry] = new_entry;
1022         added[*nadded] = new_entry;
1023         (*nadded)++;
1024         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1025                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1026                         new_entry->target, new_entry->lun);
1027 }
1028
1029 /* Remove an entry from h->dev[] array. */
1030 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1031         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1032 {
1033         /* assumes h->devlock is held */
1034         int i;
1035         struct hpsa_scsi_dev_t *sd;
1036
1037         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1038
1039         sd = h->dev[entry];
1040         removed[*nremoved] = h->dev[entry];
1041         (*nremoved)++;
1042
1043         for (i = entry; i < h->ndevices-1; i++)
1044                 h->dev[i] = h->dev[i+1];
1045         h->ndevices--;
1046         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1047                 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1048                 sd->lun);
1049 }
1050
1051 #define SCSI3ADDR_EQ(a, b) ( \
1052         (a)[7] == (b)[7] && \
1053         (a)[6] == (b)[6] && \
1054         (a)[5] == (b)[5] && \
1055         (a)[4] == (b)[4] && \
1056         (a)[3] == (b)[3] && \
1057         (a)[2] == (b)[2] && \
1058         (a)[1] == (b)[1] && \
1059         (a)[0] == (b)[0])
1060
1061 static void fixup_botched_add(struct ctlr_info *h,
1062         struct hpsa_scsi_dev_t *added)
1063 {
1064         /* called when scsi_add_device fails in order to re-adjust
1065          * h->dev[] to match the mid layer's view.
1066          */
1067         unsigned long flags;
1068         int i, j;
1069
1070         spin_lock_irqsave(&h->lock, flags);
1071         for (i = 0; i < h->ndevices; i++) {
1072                 if (h->dev[i] == added) {
1073                         for (j = i; j < h->ndevices-1; j++)
1074                                 h->dev[j] = h->dev[j+1];
1075                         h->ndevices--;
1076                         break;
1077                 }
1078         }
1079         spin_unlock_irqrestore(&h->lock, flags);
1080         kfree(added);
1081 }
1082
1083 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1084         struct hpsa_scsi_dev_t *dev2)
1085 {
1086         /* we compare everything except lun and target as these
1087          * are not yet assigned.  Compare parts likely
1088          * to differ first
1089          */
1090         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1091                 sizeof(dev1->scsi3addr)) != 0)
1092                 return 0;
1093         if (memcmp(dev1->device_id, dev2->device_id,
1094                 sizeof(dev1->device_id)) != 0)
1095                 return 0;
1096         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1097                 return 0;
1098         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1099                 return 0;
1100         if (dev1->devtype != dev2->devtype)
1101                 return 0;
1102         if (dev1->bus != dev2->bus)
1103                 return 0;
1104         return 1;
1105 }
1106
1107 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1108         struct hpsa_scsi_dev_t *dev2)
1109 {
1110         /* Device attributes that can change, but don't mean
1111          * that the device is a different device, nor that the OS
1112          * needs to be told anything about the change.
1113          */
1114         if (dev1->raid_level != dev2->raid_level)
1115                 return 1;
1116         if (dev1->offload_config != dev2->offload_config)
1117                 return 1;
1118         if (dev1->offload_enabled != dev2->offload_enabled)
1119                 return 1;
1120         return 0;
1121 }
1122
1123 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1124  * and return needle location in *index.  If scsi3addr matches, but not
1125  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1126  * location in *index.
1127  * In the case of a minor device attribute change, such as RAID level, just
1128  * return DEVICE_UPDATED, along with the updated device's location in index.
1129  * If needle not found, return DEVICE_NOT_FOUND.
1130  */
1131 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1132         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1133         int *index)
1134 {
1135         int i;
1136 #define DEVICE_NOT_FOUND 0
1137 #define DEVICE_CHANGED 1
1138 #define DEVICE_SAME 2
1139 #define DEVICE_UPDATED 3
1140         for (i = 0; i < haystack_size; i++) {
1141                 if (haystack[i] == NULL) /* previously removed. */
1142                         continue;
1143                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1144                         *index = i;
1145                         if (device_is_the_same(needle, haystack[i])) {
1146                                 if (device_updated(needle, haystack[i]))
1147                                         return DEVICE_UPDATED;
1148                                 return DEVICE_SAME;
1149                         } else {
1150                                 /* Keep offline devices offline */
1151                                 if (needle->volume_offline)
1152                                         return DEVICE_NOT_FOUND;
1153                                 return DEVICE_CHANGED;
1154                         }
1155                 }
1156         }
1157         *index = -1;
1158         return DEVICE_NOT_FOUND;
1159 }
1160
1161 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1162                                         unsigned char scsi3addr[])
1163 {
1164         struct offline_device_entry *device;
1165         unsigned long flags;
1166
1167         /* Check to see if device is already on the list */
1168         spin_lock_irqsave(&h->offline_device_lock, flags);
1169         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1170                 if (memcmp(device->scsi3addr, scsi3addr,
1171                         sizeof(device->scsi3addr)) == 0) {
1172                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1173                         return;
1174                 }
1175         }
1176         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1177
1178         /* Device is not on the list, add it. */
1179         device = kmalloc(sizeof(*device), GFP_KERNEL);
1180         if (!device) {
1181                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1182                 return;
1183         }
1184         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1185         spin_lock_irqsave(&h->offline_device_lock, flags);
1186         list_add_tail(&device->offline_list, &h->offline_device_list);
1187         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1188 }
1189
1190 /* Print a message explaining various offline volume states */
1191 static void hpsa_show_volume_status(struct ctlr_info *h,
1192         struct hpsa_scsi_dev_t *sd)
1193 {
1194         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1195                 dev_info(&h->pdev->dev,
1196                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1197                         h->scsi_host->host_no,
1198                         sd->bus, sd->target, sd->lun);
1199         switch (sd->volume_offline) {
1200         case HPSA_LV_OK:
1201                 break;
1202         case HPSA_LV_UNDERGOING_ERASE:
1203                 dev_info(&h->pdev->dev,
1204                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1205                         h->scsi_host->host_no,
1206                         sd->bus, sd->target, sd->lun);
1207                 break;
1208         case HPSA_LV_UNDERGOING_RPI:
1209                 dev_info(&h->pdev->dev,
1210                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1211                         h->scsi_host->host_no,
1212                         sd->bus, sd->target, sd->lun);
1213                 break;
1214         case HPSA_LV_PENDING_RPI:
1215                 dev_info(&h->pdev->dev,
1216                                 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1217                                 h->scsi_host->host_no,
1218                                 sd->bus, sd->target, sd->lun);
1219                 break;
1220         case HPSA_LV_ENCRYPTED_NO_KEY:
1221                 dev_info(&h->pdev->dev,
1222                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1223                         h->scsi_host->host_no,
1224                         sd->bus, sd->target, sd->lun);
1225                 break;
1226         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1227                 dev_info(&h->pdev->dev,
1228                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1229                         h->scsi_host->host_no,
1230                         sd->bus, sd->target, sd->lun);
1231                 break;
1232         case HPSA_LV_UNDERGOING_ENCRYPTION:
1233                 dev_info(&h->pdev->dev,
1234                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1235                         h->scsi_host->host_no,
1236                         sd->bus, sd->target, sd->lun);
1237                 break;
1238         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1239                 dev_info(&h->pdev->dev,
1240                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1241                         h->scsi_host->host_no,
1242                         sd->bus, sd->target, sd->lun);
1243                 break;
1244         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1245                 dev_info(&h->pdev->dev,
1246                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1247                         h->scsi_host->host_no,
1248                         sd->bus, sd->target, sd->lun);
1249                 break;
1250         case HPSA_LV_PENDING_ENCRYPTION:
1251                 dev_info(&h->pdev->dev,
1252                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1253                         h->scsi_host->host_no,
1254                         sd->bus, sd->target, sd->lun);
1255                 break;
1256         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1257                 dev_info(&h->pdev->dev,
1258                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1259                         h->scsi_host->host_no,
1260                         sd->bus, sd->target, sd->lun);
1261                 break;
1262         }
1263 }
1264
1265 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1266         struct hpsa_scsi_dev_t *sd[], int nsds)
1267 {
1268         /* sd contains scsi3 addresses and devtypes, and inquiry
1269          * data.  This function takes what's in sd to be the current
1270          * reality and updates h->dev[] to reflect that reality.
1271          */
1272         int i, entry, device_change, changes = 0;
1273         struct hpsa_scsi_dev_t *csd;
1274         unsigned long flags;
1275         struct hpsa_scsi_dev_t **added, **removed;
1276         int nadded, nremoved;
1277         struct Scsi_Host *sh = NULL;
1278
1279         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1280         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1281
1282         if (!added || !removed) {
1283                 dev_warn(&h->pdev->dev, "out of memory in "
1284                         "adjust_hpsa_scsi_table\n");
1285                 goto free_and_out;
1286         }
1287
1288         spin_lock_irqsave(&h->devlock, flags);
1289
1290         /* find any devices in h->dev[] that are not in
1291          * sd[] and remove them from h->dev[], and for any
1292          * devices which have changed, remove the old device
1293          * info and add the new device info.
1294          * If minor device attributes change, just update
1295          * the existing device structure.
1296          */
1297         i = 0;
1298         nremoved = 0;
1299         nadded = 0;
1300         while (i < h->ndevices) {
1301                 csd = h->dev[i];
1302                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1303                 if (device_change == DEVICE_NOT_FOUND) {
1304                         changes++;
1305                         hpsa_scsi_remove_entry(h, hostno, i,
1306                                 removed, &nremoved);
1307                         continue; /* remove ^^^, hence i not incremented */
1308                 } else if (device_change == DEVICE_CHANGED) {
1309                         changes++;
1310                         hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1311                                 added, &nadded, removed, &nremoved);
1312                         /* Set it to NULL to prevent it from being freed
1313                          * at the bottom of hpsa_update_scsi_devices()
1314                          */
1315                         sd[entry] = NULL;
1316                 } else if (device_change == DEVICE_UPDATED) {
1317                         hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1318                 }
1319                 i++;
1320         }
1321
1322         /* Now, make sure every device listed in sd[] is also
1323          * listed in h->dev[], adding them if they aren't found
1324          */
1325
1326         for (i = 0; i < nsds; i++) {
1327                 if (!sd[i]) /* if already added above. */
1328                         continue;
1329
1330                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1331                  * as the SCSI mid-layer does not handle such devices well.
1332                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1333                  * at 160Hz, and prevents the system from coming up.
1334                  */
1335                 if (sd[i]->volume_offline) {
1336                         hpsa_show_volume_status(h, sd[i]);
1337                         dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1338                                 h->scsi_host->host_no,
1339                                 sd[i]->bus, sd[i]->target, sd[i]->lun);
1340                         continue;
1341                 }
1342
1343                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1344                                         h->ndevices, &entry);
1345                 if (device_change == DEVICE_NOT_FOUND) {
1346                         changes++;
1347                         if (hpsa_scsi_add_entry(h, hostno, sd[i],
1348                                 added, &nadded) != 0)
1349                                 break;
1350                         sd[i] = NULL; /* prevent from being freed later. */
1351                 } else if (device_change == DEVICE_CHANGED) {
1352                         /* should never happen... */
1353                         changes++;
1354                         dev_warn(&h->pdev->dev,
1355                                 "device unexpectedly changed.\n");
1356                         /* but if it does happen, we just ignore that device */
1357                 }
1358         }
1359         spin_unlock_irqrestore(&h->devlock, flags);
1360
1361         /* Monitor devices which are in one of several NOT READY states to be
1362          * brought online later. This must be done without holding h->devlock,
1363          * so don't touch h->dev[]
1364          */
1365         for (i = 0; i < nsds; i++) {
1366                 if (!sd[i]) /* if already added above. */
1367                         continue;
1368                 if (sd[i]->volume_offline)
1369                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1370         }
1371
1372         /* Don't notify scsi mid layer of any changes the first time through
1373          * (or if there are no changes) scsi_scan_host will do it later the
1374          * first time through.
1375          */
1376         if (hostno == -1 || !changes)
1377                 goto free_and_out;
1378
1379         sh = h->scsi_host;
1380         /* Notify scsi mid layer of any removed devices */
1381         for (i = 0; i < nremoved; i++) {
1382                 struct scsi_device *sdev =
1383                         scsi_device_lookup(sh, removed[i]->bus,
1384                                 removed[i]->target, removed[i]->lun);
1385                 if (sdev != NULL) {
1386                         scsi_remove_device(sdev);
1387                         scsi_device_put(sdev);
1388                 } else {
1389                         /* We don't expect to get here.
1390                          * future cmds to this device will get selection
1391                          * timeout as if the device was gone.
1392                          */
1393                         dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1394                                 " for removal.", hostno, removed[i]->bus,
1395                                 removed[i]->target, removed[i]->lun);
1396                 }
1397                 kfree(removed[i]);
1398                 removed[i] = NULL;
1399         }
1400
1401         /* Notify scsi mid layer of any added devices */
1402         for (i = 0; i < nadded; i++) {
1403                 if (scsi_add_device(sh, added[i]->bus,
1404                         added[i]->target, added[i]->lun) == 0)
1405                         continue;
1406                 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1407                         "device not added.\n", hostno, added[i]->bus,
1408                         added[i]->target, added[i]->lun);
1409                 /* now we have to remove it from h->dev,
1410                  * since it didn't get added to scsi mid layer
1411                  */
1412                 fixup_botched_add(h, added[i]);
1413         }
1414
1415 free_and_out:
1416         kfree(added);
1417         kfree(removed);
1418 }
1419
1420 /*
1421  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1422  * Assume's h->devlock is held.
1423  */
1424 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1425         int bus, int target, int lun)
1426 {
1427         int i;
1428         struct hpsa_scsi_dev_t *sd;
1429
1430         for (i = 0; i < h->ndevices; i++) {
1431                 sd = h->dev[i];
1432                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1433                         return sd;
1434         }
1435         return NULL;
1436 }
1437
1438 /* link sdev->hostdata to our per-device structure. */
1439 static int hpsa_slave_alloc(struct scsi_device *sdev)
1440 {
1441         struct hpsa_scsi_dev_t *sd;
1442         unsigned long flags;
1443         struct ctlr_info *h;
1444
1445         h = sdev_to_hba(sdev);
1446         spin_lock_irqsave(&h->devlock, flags);
1447         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1448                 sdev_id(sdev), sdev->lun);
1449         if (sd != NULL)
1450                 sdev->hostdata = sd;
1451         spin_unlock_irqrestore(&h->devlock, flags);
1452         return 0;
1453 }
1454
1455 static void hpsa_slave_destroy(struct scsi_device *sdev)
1456 {
1457         /* nothing to do. */
1458 }
1459
1460 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1461 {
1462         int i;
1463
1464         if (!h->cmd_sg_list)
1465                 return;
1466         for (i = 0; i < h->nr_cmds; i++) {
1467                 kfree(h->cmd_sg_list[i]);
1468                 h->cmd_sg_list[i] = NULL;
1469         }
1470         kfree(h->cmd_sg_list);
1471         h->cmd_sg_list = NULL;
1472 }
1473
1474 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1475 {
1476         int i;
1477
1478         if (h->chainsize <= 0)
1479                 return 0;
1480
1481         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1482                                 GFP_KERNEL);
1483         if (!h->cmd_sg_list)
1484                 return -ENOMEM;
1485         for (i = 0; i < h->nr_cmds; i++) {
1486                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1487                                                 h->chainsize, GFP_KERNEL);
1488                 if (!h->cmd_sg_list[i])
1489                         goto clean;
1490         }
1491         return 0;
1492
1493 clean:
1494         hpsa_free_sg_chain_blocks(h);
1495         return -ENOMEM;
1496 }
1497
1498 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1499         struct CommandList *c)
1500 {
1501         struct SGDescriptor *chain_sg, *chain_block;
1502         u64 temp64;
1503
1504         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1505         chain_block = h->cmd_sg_list[c->cmdindex];
1506         chain_sg->Ext = HPSA_SG_CHAIN;
1507         chain_sg->Len = sizeof(*chain_sg) *
1508                 (c->Header.SGTotal - h->max_cmd_sg_entries);
1509         temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1510                                 PCI_DMA_TODEVICE);
1511         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1512                 /* prevent subsequent unmapping */
1513                 chain_sg->Addr.lower = 0;
1514                 chain_sg->Addr.upper = 0;
1515                 return -1;
1516         }
1517         chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1518         chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1519         return 0;
1520 }
1521
1522 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1523         struct CommandList *c)
1524 {
1525         struct SGDescriptor *chain_sg;
1526         union u64bit temp64;
1527
1528         if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1529                 return;
1530
1531         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1532         temp64.val32.lower = chain_sg->Addr.lower;
1533         temp64.val32.upper = chain_sg->Addr.upper;
1534         pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1535 }
1536
1537
1538 /* Decode the various types of errors on ioaccel2 path.
1539  * Return 1 for any error that should generate a RAID path retry.
1540  * Return 0 for errors that don't require a RAID path retry.
1541  */
1542 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1543                                         struct CommandList *c,
1544                                         struct scsi_cmnd *cmd,
1545                                         struct io_accel2_cmd *c2)
1546 {
1547         int data_len;
1548         int retry = 0;
1549
1550         switch (c2->error_data.serv_response) {
1551         case IOACCEL2_SERV_RESPONSE_COMPLETE:
1552                 switch (c2->error_data.status) {
1553                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1554                         break;
1555                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1556                         dev_warn(&h->pdev->dev,
1557                                 "%s: task complete with check condition.\n",
1558                                 "HP SSD Smart Path");
1559                         cmd->result |= SAM_STAT_CHECK_CONDITION;
1560                         if (c2->error_data.data_present !=
1561                                         IOACCEL2_SENSE_DATA_PRESENT) {
1562                                 memset(cmd->sense_buffer, 0,
1563                                         SCSI_SENSE_BUFFERSIZE);
1564                                 break;
1565                         }
1566                         /* copy the sense data */
1567                         data_len = c2->error_data.sense_data_len;
1568                         if (data_len > SCSI_SENSE_BUFFERSIZE)
1569                                 data_len = SCSI_SENSE_BUFFERSIZE;
1570                         if (data_len > sizeof(c2->error_data.sense_data_buff))
1571                                 data_len =
1572                                         sizeof(c2->error_data.sense_data_buff);
1573                         memcpy(cmd->sense_buffer,
1574                                 c2->error_data.sense_data_buff, data_len);
1575                         retry = 1;
1576                         break;
1577                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1578                         dev_warn(&h->pdev->dev,
1579                                 "%s: task complete with BUSY status.\n",
1580                                 "HP SSD Smart Path");
1581                         retry = 1;
1582                         break;
1583                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1584                         dev_warn(&h->pdev->dev,
1585                                 "%s: task complete with reservation conflict.\n",
1586                                 "HP SSD Smart Path");
1587                         retry = 1;
1588                         break;
1589                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1590                         /* Make scsi midlayer do unlimited retries */
1591                         cmd->result = DID_IMM_RETRY << 16;
1592                         break;
1593                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1594                         dev_warn(&h->pdev->dev,
1595                                 "%s: task complete with aborted status.\n",
1596                                 "HP SSD Smart Path");
1597                         retry = 1;
1598                         break;
1599                 default:
1600                         dev_warn(&h->pdev->dev,
1601                                 "%s: task complete with unrecognized status: 0x%02x\n",
1602                                 "HP SSD Smart Path", c2->error_data.status);
1603                         retry = 1;
1604                         break;
1605                 }
1606                 break;
1607         case IOACCEL2_SERV_RESPONSE_FAILURE:
1608                 /* don't expect to get here. */
1609                 dev_warn(&h->pdev->dev,
1610                         "unexpected delivery or target failure, status = 0x%02x\n",
1611                         c2->error_data.status);
1612                 retry = 1;
1613                 break;
1614         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1615                 break;
1616         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1617                 break;
1618         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1619                 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1620                 retry = 1;
1621                 break;
1622         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1623                 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1624                 break;
1625         default:
1626                 dev_warn(&h->pdev->dev,
1627                         "%s: Unrecognized server response: 0x%02x\n",
1628                         "HP SSD Smart Path",
1629                         c2->error_data.serv_response);
1630                 retry = 1;
1631                 break;
1632         }
1633
1634         return retry;   /* retry on raid path? */
1635 }
1636
1637 static void process_ioaccel2_completion(struct ctlr_info *h,
1638                 struct CommandList *c, struct scsi_cmnd *cmd,
1639                 struct hpsa_scsi_dev_t *dev)
1640 {
1641         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1642         int raid_retry = 0;
1643
1644         /* check for good status */
1645         if (likely(c2->error_data.serv_response == 0 &&
1646                         c2->error_data.status == 0)) {
1647                 cmd_free(h, c);
1648                 cmd->scsi_done(cmd);
1649                 return;
1650         }
1651
1652         /* Any RAID offload error results in retry which will use
1653          * the normal I/O path so the controller can handle whatever's
1654          * wrong.
1655          */
1656         if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1657                 c2->error_data.serv_response ==
1658                         IOACCEL2_SERV_RESPONSE_FAILURE) {
1659                 dev->offload_enabled = 0;
1660                 h->drv_req_rescan = 1;  /* schedule controller for a rescan */
1661                 cmd->result = DID_SOFT_ERROR << 16;
1662                 cmd_free(h, c);
1663                 cmd->scsi_done(cmd);
1664                 return;
1665         }
1666         raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1667         /* If error found, disable Smart Path, schedule a rescan,
1668          * and force a retry on the standard path.
1669          */
1670         if (raid_retry) {
1671                 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1672                         "HP SSD Smart Path");
1673                 dev->offload_enabled = 0; /* Disable Smart Path */
1674                 h->drv_req_rescan = 1;    /* schedule controller rescan */
1675                 cmd->result = DID_SOFT_ERROR << 16;
1676         }
1677         cmd_free(h, c);
1678         cmd->scsi_done(cmd);
1679 }
1680
1681 static void complete_scsi_command(struct CommandList *cp)
1682 {
1683         struct scsi_cmnd *cmd;
1684         struct ctlr_info *h;
1685         struct ErrorInfo *ei;
1686         struct hpsa_scsi_dev_t *dev;
1687
1688         unsigned char sense_key;
1689         unsigned char asc;      /* additional sense code */
1690         unsigned char ascq;     /* additional sense code qualifier */
1691         unsigned long sense_data_size;
1692
1693         ei = cp->err_info;
1694         cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1695         h = cp->h;
1696         dev = cmd->device->hostdata;
1697
1698         scsi_dma_unmap(cmd); /* undo the DMA mappings */
1699         if ((cp->cmd_type == CMD_SCSI) &&
1700                 (cp->Header.SGTotal > h->max_cmd_sg_entries))
1701                 hpsa_unmap_sg_chain_block(h, cp);
1702
1703         cmd->result = (DID_OK << 16);           /* host byte */
1704         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1705
1706         if (cp->cmd_type == CMD_IOACCEL2)
1707                 return process_ioaccel2_completion(h, cp, cmd, dev);
1708
1709         cmd->result |= ei->ScsiStatus;
1710
1711         /* copy the sense data whether we need to or not. */
1712         if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1713                 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1714         else
1715                 sense_data_size = sizeof(ei->SenseInfo);
1716         if (ei->SenseLen < sense_data_size)
1717                 sense_data_size = ei->SenseLen;
1718
1719         memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1720         scsi_set_resid(cmd, ei->ResidualCnt);
1721
1722         if (ei->CommandStatus == 0) {
1723                 cmd_free(h, cp);
1724                 cmd->scsi_done(cmd);
1725                 return;
1726         }
1727
1728         /* For I/O accelerator commands, copy over some fields to the normal
1729          * CISS header used below for error handling.
1730          */
1731         if (cp->cmd_type == CMD_IOACCEL1) {
1732                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1733                 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1734                 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1735                 cp->Header.Tag.lower = c->Tag.lower;
1736                 cp->Header.Tag.upper = c->Tag.upper;
1737                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1738                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1739
1740                 /* Any RAID offload error results in retry which will use
1741                  * the normal I/O path so the controller can handle whatever's
1742                  * wrong.
1743                  */
1744                 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1745                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1746                                 dev->offload_enabled = 0;
1747                         cmd->result = DID_SOFT_ERROR << 16;
1748                         cmd_free(h, cp);
1749                         cmd->scsi_done(cmd);
1750                         return;
1751                 }
1752         }
1753
1754         /* an error has occurred */
1755         switch (ei->CommandStatus) {
1756
1757         case CMD_TARGET_STATUS:
1758                 if (ei->ScsiStatus) {
1759                         /* Get sense key */
1760                         sense_key = 0xf & ei->SenseInfo[2];
1761                         /* Get additional sense code */
1762                         asc = ei->SenseInfo[12];
1763                         /* Get addition sense code qualifier */
1764                         ascq = ei->SenseInfo[13];
1765                 }
1766
1767                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1768                         if (check_for_unit_attention(h, cp))
1769                                 break;
1770                         if (sense_key == ILLEGAL_REQUEST) {
1771                                 /*
1772                                  * SCSI REPORT_LUNS is commonly unsupported on
1773                                  * Smart Array.  Suppress noisy complaint.
1774                                  */
1775                                 if (cp->Request.CDB[0] == REPORT_LUNS)
1776                                         break;
1777
1778                                 /* If ASC/ASCQ indicate Logical Unit
1779                                  * Not Supported condition,
1780                                  */
1781                                 if ((asc == 0x25) && (ascq == 0x0)) {
1782                                         dev_warn(&h->pdev->dev, "cp %p "
1783                                                 "has check condition\n", cp);
1784                                         break;
1785                                 }
1786                         }
1787
1788                         if (sense_key == NOT_READY) {
1789                                 /* If Sense is Not Ready, Logical Unit
1790                                  * Not ready, Manual Intervention
1791                                  * required
1792                                  */
1793                                 if ((asc == 0x04) && (ascq == 0x03)) {
1794                                         dev_warn(&h->pdev->dev, "cp %p "
1795                                                 "has check condition: unit "
1796                                                 "not ready, manual "
1797                                                 "intervention required\n", cp);
1798                                         break;
1799                                 }
1800                         }
1801                         if (sense_key == ABORTED_COMMAND) {
1802                                 /* Aborted command is retryable */
1803                                 dev_warn(&h->pdev->dev, "cp %p "
1804                                         "has check condition: aborted command: "
1805                                         "ASC: 0x%x, ASCQ: 0x%x\n",
1806                                         cp, asc, ascq);
1807                                 cmd->result |= DID_SOFT_ERROR << 16;
1808                                 break;
1809                         }
1810                         /* Must be some other type of check condition */
1811                         dev_dbg(&h->pdev->dev, "cp %p has check condition: "
1812                                         "unknown type: "
1813                                         "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1814                                         "Returning result: 0x%x, "
1815                                         "cmd=[%02x %02x %02x %02x %02x "
1816                                         "%02x %02x %02x %02x %02x %02x "
1817                                         "%02x %02x %02x %02x %02x]\n",
1818                                         cp, sense_key, asc, ascq,
1819                                         cmd->result,
1820                                         cmd->cmnd[0], cmd->cmnd[1],
1821                                         cmd->cmnd[2], cmd->cmnd[3],
1822                                         cmd->cmnd[4], cmd->cmnd[5],
1823                                         cmd->cmnd[6], cmd->cmnd[7],
1824                                         cmd->cmnd[8], cmd->cmnd[9],
1825                                         cmd->cmnd[10], cmd->cmnd[11],
1826                                         cmd->cmnd[12], cmd->cmnd[13],
1827                                         cmd->cmnd[14], cmd->cmnd[15]);
1828                         break;
1829                 }
1830
1831
1832                 /* Problem was not a check condition
1833                  * Pass it up to the upper layers...
1834                  */
1835                 if (ei->ScsiStatus) {
1836                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1837                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1838                                 "Returning result: 0x%x\n",
1839                                 cp, ei->ScsiStatus,
1840                                 sense_key, asc, ascq,
1841                                 cmd->result);
1842                 } else {  /* scsi status is zero??? How??? */
1843                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1844                                 "Returning no connection.\n", cp),
1845
1846                         /* Ordinarily, this case should never happen,
1847                          * but there is a bug in some released firmware
1848                          * revisions that allows it to happen if, for
1849                          * example, a 4100 backplane loses power and
1850                          * the tape drive is in it.  We assume that
1851                          * it's a fatal error of some kind because we
1852                          * can't show that it wasn't. We will make it
1853                          * look like selection timeout since that is
1854                          * the most common reason for this to occur,
1855                          * and it's severe enough.
1856                          */
1857
1858                         cmd->result = DID_NO_CONNECT << 16;
1859                 }
1860                 break;
1861
1862         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1863                 break;
1864         case CMD_DATA_OVERRUN:
1865                 dev_warn(&h->pdev->dev, "cp %p has"
1866                         " completed with data overrun "
1867                         "reported\n", cp);
1868                 break;
1869         case CMD_INVALID: {
1870                 /* print_bytes(cp, sizeof(*cp), 1, 0);
1871                 print_cmd(cp); */
1872                 /* We get CMD_INVALID if you address a non-existent device
1873                  * instead of a selection timeout (no response).  You will
1874                  * see this if you yank out a drive, then try to access it.
1875                  * This is kind of a shame because it means that any other
1876                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
1877                  * missing target. */
1878                 cmd->result = DID_NO_CONNECT << 16;
1879         }
1880                 break;
1881         case CMD_PROTOCOL_ERR:
1882                 cmd->result = DID_ERROR << 16;
1883                 dev_warn(&h->pdev->dev, "cp %p has "
1884                         "protocol error\n", cp);
1885                 break;
1886         case CMD_HARDWARE_ERR:
1887                 cmd->result = DID_ERROR << 16;
1888                 dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
1889                 break;
1890         case CMD_CONNECTION_LOST:
1891                 cmd->result = DID_ERROR << 16;
1892                 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1893                 break;
1894         case CMD_ABORTED:
1895                 cmd->result = DID_ABORT << 16;
1896                 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1897                                 cp, ei->ScsiStatus);
1898                 break;
1899         case CMD_ABORT_FAILED:
1900                 cmd->result = DID_ERROR << 16;
1901                 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1902                 break;
1903         case CMD_UNSOLICITED_ABORT:
1904                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1905                 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1906                         "abort\n", cp);
1907                 break;
1908         case CMD_TIMEOUT:
1909                 cmd->result = DID_TIME_OUT << 16;
1910                 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1911                 break;
1912         case CMD_UNABORTABLE:
1913                 cmd->result = DID_ERROR << 16;
1914                 dev_warn(&h->pdev->dev, "Command unabortable\n");
1915                 break;
1916         case CMD_IOACCEL_DISABLED:
1917                 /* This only handles the direct pass-through case since RAID
1918                  * offload is handled above.  Just attempt a retry.
1919                  */
1920                 cmd->result = DID_SOFT_ERROR << 16;
1921                 dev_warn(&h->pdev->dev,
1922                                 "cp %p had HP SSD Smart Path error\n", cp);
1923                 break;
1924         default:
1925                 cmd->result = DID_ERROR << 16;
1926                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1927                                 cp, ei->CommandStatus);
1928         }
1929         cmd_free(h, cp);
1930         cmd->scsi_done(cmd);
1931 }
1932
1933 static void hpsa_pci_unmap(struct pci_dev *pdev,
1934         struct CommandList *c, int sg_used, int data_direction)
1935 {
1936         int i;
1937         union u64bit addr64;
1938
1939         for (i = 0; i < sg_used; i++) {
1940                 addr64.val32.lower = c->SG[i].Addr.lower;
1941                 addr64.val32.upper = c->SG[i].Addr.upper;
1942                 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1943                         data_direction);
1944         }
1945 }
1946
1947 static int hpsa_map_one(struct pci_dev *pdev,
1948                 struct CommandList *cp,
1949                 unsigned char *buf,
1950                 size_t buflen,
1951                 int data_direction)
1952 {
1953         u64 addr64;
1954
1955         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1956                 cp->Header.SGList = 0;
1957                 cp->Header.SGTotal = 0;
1958                 return 0;
1959         }
1960
1961         addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
1962         if (dma_mapping_error(&pdev->dev, addr64)) {
1963                 /* Prevent subsequent unmap of something never mapped */
1964                 cp->Header.SGList = 0;
1965                 cp->Header.SGTotal = 0;
1966                 return -1;
1967         }
1968         cp->SG[0].Addr.lower =
1969           (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
1970         cp->SG[0].Addr.upper =
1971           (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
1972         cp->SG[0].Len = buflen;
1973         cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
1974         cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */
1975         cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
1976         return 0;
1977 }
1978
1979 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1980         struct CommandList *c)
1981 {
1982         DECLARE_COMPLETION_ONSTACK(wait);
1983
1984         c->waiting = &wait;
1985         enqueue_cmd_and_start_io(h, c);
1986         wait_for_completion(&wait);
1987 }
1988
1989 static u32 lockup_detected(struct ctlr_info *h)
1990 {
1991         int cpu;
1992         u32 rc, *lockup_detected;
1993
1994         cpu = get_cpu();
1995         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1996         rc = *lockup_detected;
1997         put_cpu();
1998         return rc;
1999 }
2000
2001 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2002         struct CommandList *c)
2003 {
2004         /* If controller lockup detected, fake a hardware error. */
2005         if (unlikely(lockup_detected(h)))
2006                 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2007         else
2008                 hpsa_scsi_do_simple_cmd_core(h, c);
2009 }
2010
2011 #define MAX_DRIVER_CMD_RETRIES 25
2012 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2013         struct CommandList *c, int data_direction)
2014 {
2015         int backoff_time = 10, retry_count = 0;
2016
2017         do {
2018                 memset(c->err_info, 0, sizeof(*c->err_info));
2019                 hpsa_scsi_do_simple_cmd_core(h, c);
2020                 retry_count++;
2021                 if (retry_count > 3) {
2022                         msleep(backoff_time);
2023                         if (backoff_time < 1000)
2024                                 backoff_time *= 2;
2025                 }
2026         } while ((check_for_unit_attention(h, c) ||
2027                         check_for_busy(h, c)) &&
2028                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2029         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2030 }
2031
2032 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2033                                 struct CommandList *c)
2034 {
2035         const u8 *cdb = c->Request.CDB;
2036         const u8 *lun = c->Header.LUN.LunAddrBytes;
2037
2038         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2039         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2040                 txt, lun[0], lun[1], lun[2], lun[3],
2041                 lun[4], lun[5], lun[6], lun[7],
2042                 cdb[0], cdb[1], cdb[2], cdb[3],
2043                 cdb[4], cdb[5], cdb[6], cdb[7],
2044                 cdb[8], cdb[9], cdb[10], cdb[11],
2045                 cdb[12], cdb[13], cdb[14], cdb[15]);
2046 }
2047
2048 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2049                         struct CommandList *cp)
2050 {
2051         const struct ErrorInfo *ei = cp->err_info;
2052         struct device *d = &cp->h->pdev->dev;
2053         const u8 *sd = ei->SenseInfo;
2054
2055         switch (ei->CommandStatus) {
2056         case CMD_TARGET_STATUS:
2057                 hpsa_print_cmd(h, "SCSI status", cp);
2058                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2059                         dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2060                                 sd[2] & 0x0f, sd[12], sd[13]);
2061                 else
2062                         dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2063                 if (ei->ScsiStatus == 0)
2064                         dev_warn(d, "SCSI status is abnormally zero.  "
2065                         "(probably indicates selection timeout "
2066                         "reported incorrectly due to a known "
2067                         "firmware bug, circa July, 2001.)\n");
2068                 break;
2069         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2070                 break;
2071         case CMD_DATA_OVERRUN:
2072                 hpsa_print_cmd(h, "overrun condition", cp);
2073                 break;
2074         case CMD_INVALID: {
2075                 /* controller unfortunately reports SCSI passthru's
2076                  * to non-existent targets as invalid commands.
2077                  */
2078                 hpsa_print_cmd(h, "invalid command", cp);
2079                 dev_warn(d, "probably means device no longer present\n");
2080                 }
2081                 break;
2082         case CMD_PROTOCOL_ERR:
2083                 hpsa_print_cmd(h, "protocol error", cp);
2084                 break;
2085         case CMD_HARDWARE_ERR:
2086                 hpsa_print_cmd(h, "hardware error", cp);
2087                 break;
2088         case CMD_CONNECTION_LOST:
2089                 hpsa_print_cmd(h, "connection lost", cp);
2090                 break;
2091         case CMD_ABORTED:
2092                 hpsa_print_cmd(h, "aborted", cp);
2093                 break;
2094         case CMD_ABORT_FAILED:
2095                 hpsa_print_cmd(h, "abort failed", cp);
2096                 break;
2097         case CMD_UNSOLICITED_ABORT:
2098                 hpsa_print_cmd(h, "unsolicited abort", cp);
2099                 break;
2100         case CMD_TIMEOUT:
2101                 hpsa_print_cmd(h, "timed out", cp);
2102                 break;
2103         case CMD_UNABORTABLE:
2104                 hpsa_print_cmd(h, "unabortable", cp);
2105                 break;
2106         default:
2107                 hpsa_print_cmd(h, "unknown status", cp);
2108                 dev_warn(d, "Unknown command status %x\n",
2109                                 ei->CommandStatus);
2110         }
2111 }
2112
2113 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2114                         u16 page, unsigned char *buf,
2115                         unsigned char bufsize)
2116 {
2117         int rc = IO_OK;
2118         struct CommandList *c;
2119         struct ErrorInfo *ei;
2120
2121         c = cmd_special_alloc(h);
2122
2123         if (c == NULL) {                        /* trouble... */
2124                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2125                 return -ENOMEM;
2126         }
2127
2128         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2129                         page, scsi3addr, TYPE_CMD)) {
2130                 rc = -1;
2131                 goto out;
2132         }
2133         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2134         ei = c->err_info;
2135         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2136                 hpsa_scsi_interpret_error(h, c);
2137                 rc = -1;
2138         }
2139 out:
2140         cmd_special_free(h, c);
2141         return rc;
2142 }
2143
2144 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2145                 unsigned char *scsi3addr, unsigned char page,
2146                 struct bmic_controller_parameters *buf, size_t bufsize)
2147 {
2148         int rc = IO_OK;
2149         struct CommandList *c;
2150         struct ErrorInfo *ei;
2151
2152         c = cmd_special_alloc(h);
2153
2154         if (c == NULL) {                        /* trouble... */
2155                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2156                 return -ENOMEM;
2157         }
2158
2159         if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2160                         page, scsi3addr, TYPE_CMD)) {
2161                 rc = -1;
2162                 goto out;
2163         }
2164         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2165         ei = c->err_info;
2166         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2167                 hpsa_scsi_interpret_error(h, c);
2168                 rc = -1;
2169         }
2170 out:
2171         cmd_special_free(h, c);
2172         return rc;
2173         }
2174
2175 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2176         u8 reset_type)
2177 {
2178         int rc = IO_OK;
2179         struct CommandList *c;
2180         struct ErrorInfo *ei;
2181
2182         c = cmd_special_alloc(h);
2183
2184         if (c == NULL) {                        /* trouble... */
2185                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2186                 return -ENOMEM;
2187         }
2188
2189         /* fill_cmd can't fail here, no data buffer to map. */
2190         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2191                         scsi3addr, TYPE_MSG);
2192         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2193         hpsa_scsi_do_simple_cmd_core(h, c);
2194         /* no unmap needed here because no data xfer. */
2195
2196         ei = c->err_info;
2197         if (ei->CommandStatus != 0) {
2198                 hpsa_scsi_interpret_error(h, c);
2199                 rc = -1;
2200         }
2201         cmd_special_free(h, c);
2202         return rc;
2203 }
2204
2205 static void hpsa_get_raid_level(struct ctlr_info *h,
2206         unsigned char *scsi3addr, unsigned char *raid_level)
2207 {
2208         int rc;
2209         unsigned char *buf;
2210
2211         *raid_level = RAID_UNKNOWN;
2212         buf = kzalloc(64, GFP_KERNEL);
2213         if (!buf)
2214                 return;
2215         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2216         if (rc == 0)
2217                 *raid_level = buf[8];
2218         if (*raid_level > RAID_UNKNOWN)
2219                 *raid_level = RAID_UNKNOWN;
2220         kfree(buf);
2221         return;
2222 }
2223
2224 #define HPSA_MAP_DEBUG
2225 #ifdef HPSA_MAP_DEBUG
2226 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2227                                 struct raid_map_data *map_buff)
2228 {
2229         struct raid_map_disk_data *dd = &map_buff->data[0];
2230         int map, row, col;
2231         u16 map_cnt, row_cnt, disks_per_row;
2232
2233         if (rc != 0)
2234                 return;
2235
2236         /* Show details only if debugging has been activated. */
2237         if (h->raid_offload_debug < 2)
2238                 return;
2239
2240         dev_info(&h->pdev->dev, "structure_size = %u\n",
2241                                 le32_to_cpu(map_buff->structure_size));
2242         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2243                         le32_to_cpu(map_buff->volume_blk_size));
2244         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2245                         le64_to_cpu(map_buff->volume_blk_cnt));
2246         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2247                         map_buff->phys_blk_shift);
2248         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2249                         map_buff->parity_rotation_shift);
2250         dev_info(&h->pdev->dev, "strip_size = %u\n",
2251                         le16_to_cpu(map_buff->strip_size));
2252         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2253                         le64_to_cpu(map_buff->disk_starting_blk));
2254         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2255                         le64_to_cpu(map_buff->disk_blk_cnt));
2256         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2257                         le16_to_cpu(map_buff->data_disks_per_row));
2258         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2259                         le16_to_cpu(map_buff->metadata_disks_per_row));
2260         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2261                         le16_to_cpu(map_buff->row_cnt));
2262         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2263                         le16_to_cpu(map_buff->layout_map_count));
2264         dev_info(&h->pdev->dev, "flags = %u\n",
2265                         le16_to_cpu(map_buff->flags));
2266         if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2267                 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2268         else
2269                 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2270         dev_info(&h->pdev->dev, "dekindex = %u\n",
2271                         le16_to_cpu(map_buff->dekindex));
2272
2273         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2274         for (map = 0; map < map_cnt; map++) {
2275                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2276                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2277                 for (row = 0; row < row_cnt; row++) {
2278                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2279                         disks_per_row =
2280                                 le16_to_cpu(map_buff->data_disks_per_row);
2281                         for (col = 0; col < disks_per_row; col++, dd++)
2282                                 dev_info(&h->pdev->dev,
2283                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2284                                         col, dd->ioaccel_handle,
2285                                         dd->xor_mult[0], dd->xor_mult[1]);
2286                         disks_per_row =
2287                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2288                         for (col = 0; col < disks_per_row; col++, dd++)
2289                                 dev_info(&h->pdev->dev,
2290                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2291                                         col, dd->ioaccel_handle,
2292                                         dd->xor_mult[0], dd->xor_mult[1]);
2293                 }
2294         }
2295 }
2296 #else
2297 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2298                         __attribute__((unused)) int rc,
2299                         __attribute__((unused)) struct raid_map_data *map_buff)
2300 {
2301 }
2302 #endif
2303
2304 static int hpsa_get_raid_map(struct ctlr_info *h,
2305         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2306 {
2307         int rc = 0;
2308         struct CommandList *c;
2309         struct ErrorInfo *ei;
2310
2311         c = cmd_special_alloc(h);
2312         if (c == NULL) {
2313                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2314                 return -ENOMEM;
2315         }
2316         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2317                         sizeof(this_device->raid_map), 0,
2318                         scsi3addr, TYPE_CMD)) {
2319                 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2320                 cmd_special_free(h, c);
2321                 return -ENOMEM;
2322         }
2323         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2324         ei = c->err_info;
2325         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2326                 hpsa_scsi_interpret_error(h, c);
2327                 cmd_special_free(h, c);
2328                 return -1;
2329         }
2330         cmd_special_free(h, c);
2331
2332         /* @todo in the future, dynamically allocate RAID map memory */
2333         if (le32_to_cpu(this_device->raid_map.structure_size) >
2334                                 sizeof(this_device->raid_map)) {
2335                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2336                 rc = -1;
2337         }
2338         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2339         return rc;
2340 }
2341
2342 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2343         unsigned char scsi3addr[], u8 page)
2344 {
2345         int rc;
2346         int i;
2347         int pages;
2348         unsigned char *buf, bufsize;
2349
2350         buf = kzalloc(256, GFP_KERNEL);
2351         if (!buf)
2352                 return 0;
2353
2354         /* Get the size of the page list first */
2355         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2356                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2357                                 buf, HPSA_VPD_HEADER_SZ);
2358         if (rc != 0)
2359                 goto exit_unsupported;
2360         pages = buf[3];
2361         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2362                 bufsize = pages + HPSA_VPD_HEADER_SZ;
2363         else
2364                 bufsize = 255;
2365
2366         /* Get the whole VPD page list */
2367         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2368                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2369                                 buf, bufsize);
2370         if (rc != 0)
2371                 goto exit_unsupported;
2372
2373         pages = buf[3];
2374         for (i = 1; i <= pages; i++)
2375                 if (buf[3 + i] == page)
2376                         goto exit_supported;
2377 exit_unsupported:
2378         kfree(buf);
2379         return 0;
2380 exit_supported:
2381         kfree(buf);
2382         return 1;
2383 }
2384
2385 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2386         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2387 {
2388         int rc;
2389         unsigned char *buf;
2390         u8 ioaccel_status;
2391
2392         this_device->offload_config = 0;
2393         this_device->offload_enabled = 0;
2394
2395         buf = kzalloc(64, GFP_KERNEL);
2396         if (!buf)
2397                 return;
2398         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2399                 goto out;
2400         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2401                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2402         if (rc != 0)
2403                 goto out;
2404
2405 #define IOACCEL_STATUS_BYTE 4
2406 #define OFFLOAD_CONFIGURED_BIT 0x01
2407 #define OFFLOAD_ENABLED_BIT 0x02
2408         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2409         this_device->offload_config =
2410                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2411         if (this_device->offload_config) {
2412                 this_device->offload_enabled =
2413                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2414                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2415                         this_device->offload_enabled = 0;
2416         }
2417 out:
2418         kfree(buf);
2419         return;
2420 }
2421
2422 /* Get the device id from inquiry page 0x83 */
2423 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2424         unsigned char *device_id, int buflen)
2425 {
2426         int rc;
2427         unsigned char *buf;
2428
2429         if (buflen > 16)
2430                 buflen = 16;
2431         buf = kzalloc(64, GFP_KERNEL);
2432         if (!buf)
2433                 return -ENOMEM;
2434         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2435         if (rc == 0)
2436                 memcpy(device_id, &buf[8], buflen);
2437         kfree(buf);
2438         return rc != 0;
2439 }
2440
2441 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2442                 struct ReportLUNdata *buf, int bufsize,
2443                 int extended_response)
2444 {
2445         int rc = IO_OK;
2446         struct CommandList *c;
2447         unsigned char scsi3addr[8];
2448         struct ErrorInfo *ei;
2449
2450         c = cmd_special_alloc(h);
2451         if (c == NULL) {                        /* trouble... */
2452                 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2453                 return -1;
2454         }
2455         /* address the controller */
2456         memset(scsi3addr, 0, sizeof(scsi3addr));
2457         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2458                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2459                 rc = -1;
2460                 goto out;
2461         }
2462         if (extended_response)
2463                 c->Request.CDB[1] = extended_response;
2464         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2465         ei = c->err_info;
2466         if (ei->CommandStatus != 0 &&
2467             ei->CommandStatus != CMD_DATA_UNDERRUN) {
2468                 hpsa_scsi_interpret_error(h, c);
2469                 rc = -1;
2470         } else {
2471                 if (buf->extended_response_flag != extended_response) {
2472                         dev_err(&h->pdev->dev,
2473                                 "report luns requested format %u, got %u\n",
2474                                 extended_response,
2475                                 buf->extended_response_flag);
2476                         rc = -1;
2477                 }
2478         }
2479 out:
2480         cmd_special_free(h, c);
2481         return rc;
2482 }
2483
2484 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2485                 struct ReportLUNdata *buf,
2486                 int bufsize, int extended_response)
2487 {
2488         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2489 }
2490
2491 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2492                 struct ReportLUNdata *buf, int bufsize)
2493 {
2494         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2495 }
2496
2497 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2498         int bus, int target, int lun)
2499 {
2500         device->bus = bus;
2501         device->target = target;
2502         device->lun = lun;
2503 }
2504
2505 /* Use VPD inquiry to get details of volume status */
2506 static int hpsa_get_volume_status(struct ctlr_info *h,
2507                                         unsigned char scsi3addr[])
2508 {
2509         int rc;
2510         int status;
2511         int size;
2512         unsigned char *buf;
2513
2514         buf = kzalloc(64, GFP_KERNEL);
2515         if (!buf)
2516                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2517
2518         /* Does controller have VPD for logical volume status? */
2519         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2520                 goto exit_failed;
2521
2522         /* Get the size of the VPD return buffer */
2523         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2524                                         buf, HPSA_VPD_HEADER_SZ);
2525         if (rc != 0)
2526                 goto exit_failed;
2527         size = buf[3];
2528
2529         /* Now get the whole VPD buffer */
2530         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2531                                         buf, size + HPSA_VPD_HEADER_SZ);
2532         if (rc != 0)
2533                 goto exit_failed;
2534         status = buf[4]; /* status byte */
2535
2536         kfree(buf);
2537         return status;
2538 exit_failed:
2539         kfree(buf);
2540         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2541 }
2542
2543 /* Determine offline status of a volume.
2544  * Return either:
2545  *  0 (not offline)
2546  *  0xff (offline for unknown reasons)
2547  *  # (integer code indicating one of several NOT READY states
2548  *     describing why a volume is to be kept offline)
2549  */
2550 static int hpsa_volume_offline(struct ctlr_info *h,
2551                                         unsigned char scsi3addr[])
2552 {
2553         struct CommandList *c;
2554         unsigned char *sense, sense_key, asc, ascq;
2555         int ldstat = 0;
2556         u16 cmd_status;
2557         u8 scsi_status;
2558 #define ASC_LUN_NOT_READY 0x04
2559 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2560 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2561
2562         c = cmd_alloc(h);
2563         if (!c)
2564                 return 0;
2565         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2566         hpsa_scsi_do_simple_cmd_core(h, c);
2567         sense = c->err_info->SenseInfo;
2568         sense_key = sense[2];
2569         asc = sense[12];
2570         ascq = sense[13];
2571         cmd_status = c->err_info->CommandStatus;
2572         scsi_status = c->err_info->ScsiStatus;
2573         cmd_free(h, c);
2574         /* Is the volume 'not ready'? */
2575         if (cmd_status != CMD_TARGET_STATUS ||
2576                 scsi_status != SAM_STAT_CHECK_CONDITION ||
2577                 sense_key != NOT_READY ||
2578                 asc != ASC_LUN_NOT_READY)  {
2579                 return 0;
2580         }
2581
2582         /* Determine the reason for not ready state */
2583         ldstat = hpsa_get_volume_status(h, scsi3addr);
2584
2585         /* Keep volume offline in certain cases: */
2586         switch (ldstat) {
2587         case HPSA_LV_UNDERGOING_ERASE:
2588         case HPSA_LV_UNDERGOING_RPI:
2589         case HPSA_LV_PENDING_RPI:
2590         case HPSA_LV_ENCRYPTED_NO_KEY:
2591         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2592         case HPSA_LV_UNDERGOING_ENCRYPTION:
2593         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2594         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2595                 return ldstat;
2596         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2597                 /* If VPD status page isn't available,
2598                  * use ASC/ASCQ to determine state
2599                  */
2600                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2601                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2602                         return ldstat;
2603                 break;
2604         default:
2605                 break;
2606         }
2607         return 0;
2608 }
2609
2610 static int hpsa_update_device_info(struct ctlr_info *h,
2611         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2612         unsigned char *is_OBDR_device)
2613 {
2614
2615 #define OBDR_SIG_OFFSET 43
2616 #define OBDR_TAPE_SIG "$DR-10"
2617 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2618 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2619
2620         unsigned char *inq_buff;
2621         unsigned char *obdr_sig;
2622
2623         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2624         if (!inq_buff)
2625                 goto bail_out;
2626
2627         /* Do an inquiry to the device to see what it is. */
2628         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2629                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2630                 /* Inquiry failed (msg printed already) */
2631                 dev_err(&h->pdev->dev,
2632                         "hpsa_update_device_info: inquiry failed\n");
2633                 goto bail_out;
2634         }
2635
2636         this_device->devtype = (inq_buff[0] & 0x1f);
2637         memcpy(this_device->scsi3addr, scsi3addr, 8);
2638         memcpy(this_device->vendor, &inq_buff[8],
2639                 sizeof(this_device->vendor));
2640         memcpy(this_device->model, &inq_buff[16],
2641                 sizeof(this_device->model));
2642         memset(this_device->device_id, 0,
2643                 sizeof(this_device->device_id));
2644         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2645                 sizeof(this_device->device_id));
2646
2647         if (this_device->devtype == TYPE_DISK &&
2648                 is_logical_dev_addr_mode(scsi3addr)) {
2649                 int volume_offline;
2650
2651                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2652                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2653                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2654                 volume_offline = hpsa_volume_offline(h, scsi3addr);
2655                 if (volume_offline < 0 || volume_offline > 0xff)
2656                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2657                 this_device->volume_offline = volume_offline & 0xff;
2658         } else {
2659                 this_device->raid_level = RAID_UNKNOWN;
2660                 this_device->offload_config = 0;
2661                 this_device->offload_enabled = 0;
2662                 this_device->volume_offline = 0;
2663         }
2664
2665         if (is_OBDR_device) {
2666                 /* See if this is a One-Button-Disaster-Recovery device
2667                  * by looking for "$DR-10" at offset 43 in inquiry data.
2668                  */
2669                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2670                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2671                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
2672                                                 OBDR_SIG_LEN) == 0);
2673         }
2674
2675         kfree(inq_buff);
2676         return 0;
2677
2678 bail_out:
2679         kfree(inq_buff);
2680         return 1;
2681 }
2682
2683 static unsigned char *ext_target_model[] = {
2684         "MSA2012",
2685         "MSA2024",
2686         "MSA2312",
2687         "MSA2324",
2688         "P2000 G3 SAS",
2689         "MSA 2040 SAS",
2690         NULL,
2691 };
2692
2693 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2694 {
2695         int i;
2696
2697         for (i = 0; ext_target_model[i]; i++)
2698                 if (strncmp(device->model, ext_target_model[i],
2699                         strlen(ext_target_model[i])) == 0)
2700                         return 1;
2701         return 0;
2702 }
2703
2704 /* Helper function to assign bus, target, lun mapping of devices.
2705  * Puts non-external target logical volumes on bus 0, external target logical
2706  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2707  * Logical drive target and lun are assigned at this time, but
2708  * physical device lun and target assignment are deferred (assigned
2709  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2710  */
2711 static void figure_bus_target_lun(struct ctlr_info *h,
2712         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2713 {
2714         u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2715
2716         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2717                 /* physical device, target and lun filled in later */
2718                 if (is_hba_lunid(lunaddrbytes))
2719                         hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2720                 else
2721                         /* defer target, lun assignment for physical devices */
2722                         hpsa_set_bus_target_lun(device, 2, -1, -1);
2723                 return;
2724         }
2725         /* It's a logical device */
2726         if (is_ext_target(h, device)) {
2727                 /* external target way, put logicals on bus 1
2728                  * and match target/lun numbers box
2729                  * reports, other smart array, bus 0, target 0, match lunid
2730                  */
2731                 hpsa_set_bus_target_lun(device,
2732                         1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2733                 return;
2734         }
2735         hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2736 }
2737
2738 /*
2739  * If there is no lun 0 on a target, linux won't find any devices.
2740  * For the external targets (arrays), we have to manually detect the enclosure
2741  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2742  * it for some reason.  *tmpdevice is the target we're adding,
2743  * this_device is a pointer into the current element of currentsd[]
2744  * that we're building up in update_scsi_devices(), below.
2745  * lunzerobits is a bitmap that tracks which targets already have a
2746  * lun 0 assigned.
2747  * Returns 1 if an enclosure was added, 0 if not.
2748  */
2749 static int add_ext_target_dev(struct ctlr_info *h,
2750         struct hpsa_scsi_dev_t *tmpdevice,
2751         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2752         unsigned long lunzerobits[], int *n_ext_target_devs)
2753 {
2754         unsigned char scsi3addr[8];
2755
2756         if (test_bit(tmpdevice->target, lunzerobits))
2757                 return 0; /* There is already a lun 0 on this target. */
2758
2759         if (!is_logical_dev_addr_mode(lunaddrbytes))
2760                 return 0; /* It's the logical targets that may lack lun 0. */
2761
2762         if (!is_ext_target(h, tmpdevice))
2763                 return 0; /* Only external target devices have this problem. */
2764
2765         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2766                 return 0;
2767
2768         memset(scsi3addr, 0, 8);
2769         scsi3addr[3] = tmpdevice->target;
2770         if (is_hba_lunid(scsi3addr))
2771                 return 0; /* Don't add the RAID controller here. */
2772
2773         if (is_scsi_rev_5(h))
2774                 return 0; /* p1210m doesn't need to do this. */
2775
2776         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2777                 dev_warn(&h->pdev->dev, "Maximum number of external "
2778                         "target devices exceeded.  Check your hardware "
2779                         "configuration.");
2780                 return 0;
2781         }
2782
2783         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2784                 return 0;
2785         (*n_ext_target_devs)++;
2786         hpsa_set_bus_target_lun(this_device,
2787                                 tmpdevice->bus, tmpdevice->target, 0);
2788         set_bit(tmpdevice->target, lunzerobits);
2789         return 1;
2790 }
2791
2792 /*
2793  * Get address of physical disk used for an ioaccel2 mode command:
2794  *      1. Extract ioaccel2 handle from the command.
2795  *      2. Find a matching ioaccel2 handle from list of physical disks.
2796  *      3. Return:
2797  *              1 and set scsi3addr to address of matching physical
2798  *              0 if no matching physical disk was found.
2799  */
2800 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2801         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2802 {
2803         struct ReportExtendedLUNdata *physicals = NULL;
2804         int responsesize = 24;  /* size of physical extended response */
2805         int extended = 2;       /* flag forces reporting 'other dev info'. */
2806         int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2807         u32 nphysicals = 0;     /* number of reported physical devs */
2808         int found = 0;          /* found match (1) or not (0) */
2809         u32 find;               /* handle we need to match */
2810         int i;
2811         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2812         struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2813         struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2814         u32 it_nexus;           /* 4 byte device handle for the ioaccel2 cmd */
2815         u32 scsi_nexus;         /* 4 byte device handle for the ioaccel2 cmd */
2816
2817         if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2818                 return 0; /* no match */
2819
2820         /* point to the ioaccel2 device handle */
2821         c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2822         if (c2a == NULL)
2823                 return 0; /* no match */
2824
2825         scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2826         if (scmd == NULL)
2827                 return 0; /* no match */
2828
2829         d = scmd->device->hostdata;
2830         if (d == NULL)
2831                 return 0; /* no match */
2832
2833         it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2834         scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2835         find = c2a->scsi_nexus;
2836
2837         if (h->raid_offload_debug > 0)
2838                 dev_info(&h->pdev->dev,
2839                         "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2840                         __func__, scsi_nexus,
2841                         d->device_id[0], d->device_id[1], d->device_id[2],
2842                         d->device_id[3], d->device_id[4], d->device_id[5],
2843                         d->device_id[6], d->device_id[7], d->device_id[8],
2844                         d->device_id[9], d->device_id[10], d->device_id[11],
2845                         d->device_id[12], d->device_id[13], d->device_id[14],
2846                         d->device_id[15]);
2847
2848         /* Get the list of physical devices */
2849         physicals = kzalloc(reportsize, GFP_KERNEL);
2850         if (physicals == NULL)
2851                 return 0;
2852         if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2853                 reportsize, extended)) {
2854                 dev_err(&h->pdev->dev,
2855                         "Can't lookup %s device handle: report physical LUNs failed.\n",
2856                         "HP SSD Smart Path");
2857                 kfree(physicals);
2858                 return 0;
2859         }
2860         nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2861                                                         responsesize;
2862
2863         /* find ioaccel2 handle in list of physicals: */
2864         for (i = 0; i < nphysicals; i++) {
2865                 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2866
2867                 /* handle is in bytes 28-31 of each lun */
2868                 if (entry->ioaccel_handle != find)
2869                         continue; /* didn't match */
2870                 found = 1;
2871                 memcpy(scsi3addr, entry->lunid, 8);
2872                 if (h->raid_offload_debug > 0)
2873                         dev_info(&h->pdev->dev,
2874                                 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2875                                 __func__, find,
2876                                 entry->ioaccel_handle, scsi3addr);
2877                 break; /* found it */
2878         }
2879
2880         kfree(physicals);
2881         if (found)
2882                 return 1;
2883         else
2884                 return 0;
2885
2886 }
2887 /*
2888  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
2889  * logdev.  The number of luns in physdev and logdev are returned in
2890  * *nphysicals and *nlogicals, respectively.
2891  * Returns 0 on success, -1 otherwise.
2892  */
2893 static int hpsa_gather_lun_info(struct ctlr_info *h,
2894         int reportlunsize,
2895         struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2896         struct ReportLUNdata *logdev, u32 *nlogicals)
2897 {
2898         int physical_entry_size = 8;
2899
2900         *physical_mode = 0;
2901
2902         /* For I/O accelerator mode we need to read physical device handles */
2903         if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2904                 h->transMethod & CFGTBL_Trans_io_accel2) {
2905                 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2906                 physical_entry_size = 24;
2907         }
2908         if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
2909                                                         *physical_mode)) {
2910                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2911                 return -1;
2912         }
2913         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2914                                                         physical_entry_size;
2915         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2916                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2917                         "  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2918                         *nphysicals - HPSA_MAX_PHYS_LUN);
2919                 *nphysicals = HPSA_MAX_PHYS_LUN;
2920         }
2921         if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
2922                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2923                 return -1;
2924         }
2925         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2926         /* Reject Logicals in excess of our max capability. */
2927         if (*nlogicals > HPSA_MAX_LUN) {
2928                 dev_warn(&h->pdev->dev,
2929                         "maximum logical LUNs (%d) exceeded.  "
2930                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
2931                         *nlogicals - HPSA_MAX_LUN);
2932                         *nlogicals = HPSA_MAX_LUN;
2933         }
2934         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2935                 dev_warn(&h->pdev->dev,
2936                         "maximum logical + physical LUNs (%d) exceeded. "
2937                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2938                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2939                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2940         }
2941         return 0;
2942 }
2943
2944 u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
2945         int nphysicals, int nlogicals,
2946         struct ReportExtendedLUNdata *physdev_list,
2947         struct ReportLUNdata *logdev_list)
2948 {
2949         /* Helper function, figure out where the LUN ID info is coming from
2950          * given index i, lists of physical and logical devices, where in
2951          * the list the raid controller is supposed to appear (first or last)
2952          */
2953
2954         int logicals_start = nphysicals + (raid_ctlr_position == 0);
2955         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2956
2957         if (i == raid_ctlr_position)
2958                 return RAID_CTLR_LUNID;
2959
2960         if (i < logicals_start)
2961                 return &physdev_list->LUN[i -
2962                                 (raid_ctlr_position == 0)].lunid[0];
2963
2964         if (i < last_device)
2965                 return &logdev_list->LUN[i - nphysicals -
2966                         (raid_ctlr_position == 0)][0];
2967         BUG();
2968         return NULL;
2969 }
2970
2971 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2972 {
2973         int rc;
2974         int hba_mode_enabled;
2975         struct bmic_controller_parameters *ctlr_params;
2976         ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2977                 GFP_KERNEL);
2978
2979         if (!ctlr_params)
2980                 return -ENOMEM;
2981         rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2982                 sizeof(struct bmic_controller_parameters));
2983         if (rc) {
2984                 kfree(ctlr_params);
2985                 return rc;
2986         }
2987
2988         hba_mode_enabled =
2989                 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2990         kfree(ctlr_params);
2991         return hba_mode_enabled;
2992 }
2993
2994 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2995 {
2996         /* the idea here is we could get notified
2997          * that some devices have changed, so we do a report
2998          * physical luns and report logical luns cmd, and adjust
2999          * our list of devices accordingly.
3000          *
3001          * The scsi3addr's of devices won't change so long as the
3002          * adapter is not reset.  That means we can rescan and
3003          * tell which devices we already know about, vs. new
3004          * devices, vs.  disappearing devices.
3005          */
3006         struct ReportExtendedLUNdata *physdev_list = NULL;
3007         struct ReportLUNdata *logdev_list = NULL;
3008         u32 nphysicals = 0;
3009         u32 nlogicals = 0;
3010         int physical_mode = 0;
3011         u32 ndev_allocated = 0;
3012         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3013         int ncurrent = 0;
3014         int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
3015         int i, n_ext_target_devs, ndevs_to_allocate;
3016         int raid_ctlr_position;
3017         int rescan_hba_mode;
3018         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3019
3020         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3021         physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3022         logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3023         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3024
3025         if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
3026                 dev_err(&h->pdev->dev, "out of memory\n");
3027                 goto out;
3028         }
3029         memset(lunzerobits, 0, sizeof(lunzerobits));
3030
3031         rescan_hba_mode = hpsa_hba_mode_enabled(h);
3032         if (rescan_hba_mode < 0)
3033                 goto out;
3034
3035         if (!h->hba_mode_enabled && rescan_hba_mode)
3036                 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3037         else if (h->hba_mode_enabled && !rescan_hba_mode)
3038                 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3039
3040         h->hba_mode_enabled = rescan_hba_mode;
3041
3042         if (hpsa_gather_lun_info(h, reportlunsize,
3043                         (struct ReportLUNdata *) physdev_list, &nphysicals,
3044                         &physical_mode, logdev_list, &nlogicals))
3045                 goto out;
3046
3047         /* We might see up to the maximum number of logical and physical disks
3048          * plus external target devices, and a device for the local RAID
3049          * controller.
3050          */
3051         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3052
3053         /* Allocate the per device structures */
3054         for (i = 0; i < ndevs_to_allocate; i++) {
3055                 if (i >= HPSA_MAX_DEVICES) {
3056                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3057                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3058                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3059                         break;
3060                 }
3061
3062                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3063                 if (!currentsd[i]) {
3064                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3065                                 __FILE__, __LINE__);
3066                         goto out;
3067                 }
3068                 ndev_allocated++;
3069         }
3070
3071         if (is_scsi_rev_5(h))
3072                 raid_ctlr_position = 0;
3073         else
3074                 raid_ctlr_position = nphysicals + nlogicals;
3075
3076         /* adjust our table of devices */
3077         n_ext_target_devs = 0;
3078         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3079                 u8 *lunaddrbytes, is_OBDR = 0;
3080
3081                 /* Figure out where the LUN ID info is coming from */
3082                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3083                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3084                 /* skip masked physical devices. */
3085                 if (lunaddrbytes[3] & 0xC0 &&
3086                         i < nphysicals + (raid_ctlr_position == 0))
3087                         continue;
3088
3089                 /* Get device type, vendor, model, device id */
3090                 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3091                                                         &is_OBDR))
3092                         continue; /* skip it if we can't talk to it. */
3093                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3094                 this_device = currentsd[ncurrent];
3095
3096                 /*
3097                  * For external target devices, we have to insert a LUN 0 which
3098                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3099                  * is nonetheless an enclosure device there.  We have to
3100                  * present that otherwise linux won't find anything if
3101                  * there is no lun 0.
3102                  */
3103                 if (add_ext_target_dev(h, tmpdevice, this_device,
3104                                 lunaddrbytes, lunzerobits,
3105                                 &n_ext_target_devs)) {
3106                         ncurrent++;
3107                         this_device = currentsd[ncurrent];
3108                 }
3109
3110                 *this_device = *tmpdevice;
3111
3112                 switch (this_device->devtype) {
3113                 case TYPE_ROM:
3114                         /* We don't *really* support actual CD-ROM devices,
3115                          * just "One Button Disaster Recovery" tape drive
3116                          * which temporarily pretends to be a CD-ROM drive.
3117                          * So we check that the device is really an OBDR tape
3118                          * device by checking for "$DR-10" in bytes 43-48 of
3119                          * the inquiry data.
3120                          */
3121                         if (is_OBDR)
3122                                 ncurrent++;
3123                         break;
3124                 case TYPE_DISK:
3125                         if (h->hba_mode_enabled) {
3126                                 /* never use raid mapper in HBA mode */
3127                                 this_device->offload_enabled = 0;
3128                                 ncurrent++;
3129                                 break;
3130                         } else if (h->acciopath_status) {
3131                                 if (i >= nphysicals) {
3132                                         ncurrent++;
3133                                         break;
3134                                 }
3135                         } else {
3136                                 if (i < nphysicals)
3137                                         break;
3138                                 ncurrent++;
3139                                 break;
3140                         }
3141                         if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3142                                 memcpy(&this_device->ioaccel_handle,
3143                                         &lunaddrbytes[20],
3144                                         sizeof(this_device->ioaccel_handle));
3145                                 ncurrent++;
3146                         }
3147                         break;
3148                 case TYPE_TAPE:
3149                 case TYPE_MEDIUM_CHANGER:
3150                         ncurrent++;
3151                         break;
3152                 case TYPE_RAID:
3153                         /* Only present the Smartarray HBA as a RAID controller.
3154                          * If it's a RAID controller other than the HBA itself
3155                          * (an external RAID controller, MSA500 or similar)
3156                          * don't present it.
3157                          */
3158                         if (!is_hba_lunid(lunaddrbytes))
3159                                 break;
3160                         ncurrent++;
3161                         break;
3162                 default:
3163                         break;
3164                 }
3165                 if (ncurrent >= HPSA_MAX_DEVICES)
3166                         break;
3167         }
3168         adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3169 out:
3170         kfree(tmpdevice);
3171         for (i = 0; i < ndev_allocated; i++)
3172                 kfree(currentsd[i]);
3173         kfree(currentsd);
3174         kfree(physdev_list);
3175         kfree(logdev_list);
3176 }
3177
3178 /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3179  * dma mapping  and fills in the scatter gather entries of the
3180  * hpsa command, cp.
3181  */
3182 static int hpsa_scatter_gather(struct ctlr_info *h,
3183                 struct CommandList *cp,
3184                 struct scsi_cmnd *cmd)
3185 {
3186         unsigned int len;
3187         struct scatterlist *sg;
3188         u64 addr64;
3189         int use_sg, i, sg_index, chained;
3190         struct SGDescriptor *curr_sg;
3191
3192         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3193
3194         use_sg = scsi_dma_map(cmd);
3195         if (use_sg < 0)
3196                 return use_sg;
3197
3198         if (!use_sg)
3199                 goto sglist_finished;
3200
3201         curr_sg = cp->SG;
3202         chained = 0;
3203         sg_index = 0;
3204         scsi_for_each_sg(cmd, sg, use_sg, i) {
3205                 if (i == h->max_cmd_sg_entries - 1 &&
3206                         use_sg > h->max_cmd_sg_entries) {
3207                         chained = 1;
3208                         curr_sg = h->cmd_sg_list[cp->cmdindex];
3209                         sg_index = 0;
3210                 }
3211                 addr64 = (u64) sg_dma_address(sg);
3212                 len  = sg_dma_len(sg);
3213                 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3214                 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3215                 curr_sg->Len = len;
3216                 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
3217                 curr_sg++;
3218         }
3219
3220         if (use_sg + chained > h->maxSG)
3221                 h->maxSG = use_sg + chained;
3222
3223         if (chained) {
3224                 cp->Header.SGList = h->max_cmd_sg_entries;
3225                 cp->Header.SGTotal = (u16) (use_sg + 1);
3226                 if (hpsa_map_sg_chain_block(h, cp)) {
3227                         scsi_dma_unmap(cmd);
3228                         return -1;
3229                 }
3230                 return 0;
3231         }
3232
3233 sglist_finished:
3234
3235         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
3236         cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
3237         return 0;
3238 }
3239
3240 #define IO_ACCEL_INELIGIBLE (1)
3241 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3242 {
3243         int is_write = 0;
3244         u32 block;
3245         u32 block_cnt;
3246
3247         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3248         switch (cdb[0]) {
3249         case WRITE_6:
3250         case WRITE_12:
3251                 is_write = 1;
3252         case READ_6:
3253         case READ_12:
3254                 if (*cdb_len == 6) {
3255                         block = (((u32) cdb[2]) << 8) | cdb[3];
3256                         block_cnt = cdb[4];
3257                 } else {
3258                         BUG_ON(*cdb_len != 12);
3259                         block = (((u32) cdb[2]) << 24) |
3260                                 (((u32) cdb[3]) << 16) |
3261                                 (((u32) cdb[4]) << 8) |
3262                                 cdb[5];
3263                         block_cnt =
3264                                 (((u32) cdb[6]) << 24) |
3265                                 (((u32) cdb[7]) << 16) |
3266                                 (((u32) cdb[8]) << 8) |
3267                                 cdb[9];
3268                 }
3269                 if (block_cnt > 0xffff)
3270                         return IO_ACCEL_INELIGIBLE;
3271
3272                 cdb[0] = is_write ? WRITE_10 : READ_10;
3273                 cdb[1] = 0;
3274                 cdb[2] = (u8) (block >> 24);
3275                 cdb[3] = (u8) (block >> 16);
3276                 cdb[4] = (u8) (block >> 8);
3277                 cdb[5] = (u8) (block);
3278                 cdb[6] = 0;
3279                 cdb[7] = (u8) (block_cnt >> 8);
3280                 cdb[8] = (u8) (block_cnt);
3281                 cdb[9] = 0;
3282                 *cdb_len = 10;
3283                 break;
3284         }
3285         return 0;
3286 }
3287
3288 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3289         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3290         u8 *scsi3addr)
3291 {
3292         struct scsi_cmnd *cmd = c->scsi_cmd;
3293         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3294         unsigned int len;
3295         unsigned int total_len = 0;
3296         struct scatterlist *sg;
3297         u64 addr64;
3298         int use_sg, i;
3299         struct SGDescriptor *curr_sg;
3300         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3301
3302         /* TODO: implement chaining support */
3303         if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3304                 return IO_ACCEL_INELIGIBLE;
3305
3306         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3307
3308         if (fixup_ioaccel_cdb(cdb, &cdb_len))
3309                 return IO_ACCEL_INELIGIBLE;
3310
3311         c->cmd_type = CMD_IOACCEL1;
3312
3313         /* Adjust the DMA address to point to the accelerated command buffer */
3314         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3315                                 (c->cmdindex * sizeof(*cp));
3316         BUG_ON(c->busaddr & 0x0000007F);
3317
3318         use_sg = scsi_dma_map(cmd);
3319         if (use_sg < 0)
3320                 return use_sg;
3321
3322         if (use_sg) {
3323                 curr_sg = cp->SG;
3324                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3325                         addr64 = (u64) sg_dma_address(sg);
3326                         len  = sg_dma_len(sg);
3327                         total_len += len;
3328                         curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3329                         curr_sg->Addr.upper =
3330                                 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3331                         curr_sg->Len = len;
3332
3333                         if (i == (scsi_sg_count(cmd) - 1))
3334                                 curr_sg->Ext = HPSA_SG_LAST;
3335                         else
3336                                 curr_sg->Ext = 0;  /* we are not chaining */
3337                         curr_sg++;
3338                 }
3339
3340                 switch (cmd->sc_data_direction) {
3341                 case DMA_TO_DEVICE:
3342                         control |= IOACCEL1_CONTROL_DATA_OUT;
3343                         break;
3344                 case DMA_FROM_DEVICE:
3345                         control |= IOACCEL1_CONTROL_DATA_IN;
3346                         break;
3347                 case DMA_NONE:
3348                         control |= IOACCEL1_CONTROL_NODATAXFER;
3349                         break;
3350                 default:
3351                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3352                         cmd->sc_data_direction);
3353                         BUG();
3354                         break;
3355                 }
3356         } else {
3357                 control |= IOACCEL1_CONTROL_NODATAXFER;
3358         }
3359
3360         c->Header.SGList = use_sg;
3361         /* Fill out the command structure to submit */
3362         cp->dev_handle = ioaccel_handle & 0xFFFF;
3363         cp->transfer_len = total_len;
3364         cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
3365                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
3366         cp->control = control;
3367         memcpy(cp->CDB, cdb, cdb_len);
3368         memcpy(cp->CISS_LUN, scsi3addr, 8);
3369         /* Tag was already set at init time. */
3370         enqueue_cmd_and_start_io(h, c);
3371         return 0;
3372 }
3373
3374 /*
3375  * Queue a command directly to a device behind the controller using the
3376  * I/O accelerator path.
3377  */
3378 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3379         struct CommandList *c)
3380 {
3381         struct scsi_cmnd *cmd = c->scsi_cmd;
3382         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3383
3384         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3385                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3386 }
3387
3388 /*
3389  * Set encryption parameters for the ioaccel2 request
3390  */
3391 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3392         struct CommandList *c, struct io_accel2_cmd *cp)
3393 {
3394         struct scsi_cmnd *cmd = c->scsi_cmd;
3395         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3396         struct raid_map_data *map = &dev->raid_map;
3397         u64 first_block;
3398
3399         BUG_ON(!(dev->offload_config && dev->offload_enabled));
3400
3401         /* Are we doing encryption on this device */
3402         if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3403                 return;
3404         /* Set the data encryption key index. */
3405         cp->dekindex = map->dekindex;
3406
3407         /* Set the encryption enable flag, encoded into direction field. */
3408         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3409
3410         /* Set encryption tweak values based on logical block address
3411          * If block size is 512, tweak value is LBA.
3412          * For other block sizes, tweak is (LBA * block size)/ 512)
3413          */
3414         switch (cmd->cmnd[0]) {
3415         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3416         case WRITE_6:
3417         case READ_6:
3418                 if (map->volume_blk_size == 512) {
3419                         cp->tweak_lower =
3420                                 (((u32) cmd->cmnd[2]) << 8) |
3421                                         cmd->cmnd[3];
3422                         cp->tweak_upper = 0;
3423                 } else {
3424                         first_block =
3425                                 (((u64) cmd->cmnd[2]) << 8) |
3426                                         cmd->cmnd[3];
3427                         first_block = (first_block * map->volume_blk_size)/512;
3428                         cp->tweak_lower = (u32)first_block;
3429                         cp->tweak_upper = (u32)(first_block >> 32);
3430                 }
3431                 break;
3432         case WRITE_10:
3433         case READ_10:
3434                 if (map->volume_blk_size == 512) {
3435                         cp->tweak_lower =
3436                                 (((u32) cmd->cmnd[2]) << 24) |
3437                                 (((u32) cmd->cmnd[3]) << 16) |
3438                                 (((u32) cmd->cmnd[4]) << 8) |
3439                                         cmd->cmnd[5];
3440                         cp->tweak_upper = 0;
3441                 } else {
3442                         first_block =
3443                                 (((u64) cmd->cmnd[2]) << 24) |
3444                                 (((u64) cmd->cmnd[3]) << 16) |
3445                                 (((u64) cmd->cmnd[4]) << 8) |
3446                                         cmd->cmnd[5];
3447                         first_block = (first_block * map->volume_blk_size)/512;
3448                         cp->tweak_lower = (u32)first_block;
3449                         cp->tweak_upper = (u32)(first_block >> 32);
3450                 }
3451                 break;
3452         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3453         case WRITE_12:
3454         case READ_12:
3455                 if (map->volume_blk_size == 512) {
3456                         cp->tweak_lower =
3457                                 (((u32) cmd->cmnd[2]) << 24) |
3458                                 (((u32) cmd->cmnd[3]) << 16) |
3459                                 (((u32) cmd->cmnd[4]) << 8) |
3460                                         cmd->cmnd[5];
3461                         cp->tweak_upper = 0;
3462                 } else {
3463                         first_block =
3464                                 (((u64) cmd->cmnd[2]) << 24) |
3465                                 (((u64) cmd->cmnd[3]) << 16) |
3466                                 (((u64) cmd->cmnd[4]) << 8) |
3467                                         cmd->cmnd[5];
3468                         first_block = (first_block * map->volume_blk_size)/512;
3469                         cp->tweak_lower = (u32)first_block;
3470                         cp->tweak_upper = (u32)(first_block >> 32);
3471                 }
3472                 break;
3473         case WRITE_16:
3474         case READ_16:
3475                 if (map->volume_blk_size == 512) {
3476                         cp->tweak_lower =
3477                                 (((u32) cmd->cmnd[6]) << 24) |
3478                                 (((u32) cmd->cmnd[7]) << 16) |
3479                                 (((u32) cmd->cmnd[8]) << 8) |
3480                                         cmd->cmnd[9];
3481                         cp->tweak_upper =
3482                                 (((u32) cmd->cmnd[2]) << 24) |
3483                                 (((u32) cmd->cmnd[3]) << 16) |
3484                                 (((u32) cmd->cmnd[4]) << 8) |
3485                                         cmd->cmnd[5];
3486                 } else {
3487                         first_block =
3488                                 (((u64) cmd->cmnd[2]) << 56) |
3489                                 (((u64) cmd->cmnd[3]) << 48) |
3490                                 (((u64) cmd->cmnd[4]) << 40) |
3491                                 (((u64) cmd->cmnd[5]) << 32) |
3492                                 (((u64) cmd->cmnd[6]) << 24) |
3493                                 (((u64) cmd->cmnd[7]) << 16) |
3494                                 (((u64) cmd->cmnd[8]) << 8) |
3495                                         cmd->cmnd[9];
3496                         first_block = (first_block * map->volume_blk_size)/512;
3497                         cp->tweak_lower = (u32)first_block;
3498                         cp->tweak_upper = (u32)(first_block >> 32);
3499                 }
3500                 break;
3501         default:
3502                 dev_err(&h->pdev->dev,
3503                         "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3504                         __func__);
3505                 BUG();
3506                 break;
3507         }
3508 }
3509
3510 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3511         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3512         u8 *scsi3addr)
3513 {
3514         struct scsi_cmnd *cmd = c->scsi_cmd;
3515         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3516         struct ioaccel2_sg_element *curr_sg;
3517         int use_sg, i;
3518         struct scatterlist *sg;
3519         u64 addr64;
3520         u32 len;
3521         u32 total_len = 0;
3522
3523         if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3524                 return IO_ACCEL_INELIGIBLE;
3525
3526         if (fixup_ioaccel_cdb(cdb, &cdb_len))
3527                 return IO_ACCEL_INELIGIBLE;
3528         c->cmd_type = CMD_IOACCEL2;
3529         /* Adjust the DMA address to point to the accelerated command buffer */
3530         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3531                                 (c->cmdindex * sizeof(*cp));
3532         BUG_ON(c->busaddr & 0x0000007F);
3533
3534         memset(cp, 0, sizeof(*cp));
3535         cp->IU_type = IOACCEL2_IU_TYPE;
3536
3537         use_sg = scsi_dma_map(cmd);
3538         if (use_sg < 0)
3539                 return use_sg;
3540
3541         if (use_sg) {
3542                 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3543                 curr_sg = cp->sg;
3544                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3545                         addr64 = (u64) sg_dma_address(sg);
3546                         len  = sg_dma_len(sg);
3547                         total_len += len;
3548                         curr_sg->address = cpu_to_le64(addr64);
3549                         curr_sg->length = cpu_to_le32(len);
3550                         curr_sg->reserved[0] = 0;
3551                         curr_sg->reserved[1] = 0;
3552                         curr_sg->reserved[2] = 0;
3553                         curr_sg->chain_indicator = 0;
3554                         curr_sg++;
3555                 }
3556
3557                 switch (cmd->sc_data_direction) {
3558                 case DMA_TO_DEVICE:
3559                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3560                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
3561                         break;
3562                 case DMA_FROM_DEVICE:
3563                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3564                         cp->direction |= IOACCEL2_DIR_DATA_IN;
3565                         break;
3566                 case DMA_NONE:
3567                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3568                         cp->direction |= IOACCEL2_DIR_NO_DATA;
3569                         break;
3570                 default:
3571                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3572                                 cmd->sc_data_direction);
3573                         BUG();
3574                         break;
3575                 }
3576         } else {
3577                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3578                 cp->direction |= IOACCEL2_DIR_NO_DATA;
3579         }
3580
3581         /* Set encryption parameters, if necessary */
3582         set_encrypt_ioaccel2(h, c, cp);
3583
3584         cp->scsi_nexus = ioaccel_handle;
3585         cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
3586                                 DIRECT_LOOKUP_BIT;
3587         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3588
3589         /* fill in sg elements */
3590         cp->sg_count = (u8) use_sg;
3591
3592         cp->data_len = cpu_to_le32(total_len);
3593         cp->err_ptr = cpu_to_le64(c->busaddr +
3594                         offsetof(struct io_accel2_cmd, error_data));
3595         cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
3596
3597         enqueue_cmd_and_start_io(h, c);
3598         return 0;
3599 }
3600
3601 /*
3602  * Queue a command to the correct I/O accelerator path.
3603  */
3604 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3605         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3606         u8 *scsi3addr)
3607 {
3608         if (h->transMethod & CFGTBL_Trans_io_accel1)
3609                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3610                                                 cdb, cdb_len, scsi3addr);
3611         else
3612                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3613                                                 cdb, cdb_len, scsi3addr);
3614 }
3615
3616 static void raid_map_helper(struct raid_map_data *map,
3617                 int offload_to_mirror, u32 *map_index, u32 *current_group)
3618 {
3619         if (offload_to_mirror == 0)  {
3620                 /* use physical disk in the first mirrored group. */
3621                 *map_index %= map->data_disks_per_row;
3622                 return;
3623         }
3624         do {
3625                 /* determine mirror group that *map_index indicates */
3626                 *current_group = *map_index / map->data_disks_per_row;
3627                 if (offload_to_mirror == *current_group)
3628                         continue;
3629                 if (*current_group < (map->layout_map_count - 1)) {
3630                         /* select map index from next group */
3631                         *map_index += map->data_disks_per_row;
3632                         (*current_group)++;
3633                 } else {
3634                         /* select map index from first group */
3635                         *map_index %= map->data_disks_per_row;
3636                         *current_group = 0;
3637                 }
3638         } while (offload_to_mirror != *current_group);
3639 }
3640
3641 /*
3642  * Attempt to perform offload RAID mapping for a logical volume I/O.
3643  */
3644 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3645         struct CommandList *c)
3646 {
3647         struct scsi_cmnd *cmd = c->scsi_cmd;
3648         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3649         struct raid_map_data *map = &dev->raid_map;
3650         struct raid_map_disk_data *dd = &map->data[0];
3651         int is_write = 0;
3652         u32 map_index;
3653         u64 first_block, last_block;
3654         u32 block_cnt;
3655         u32 blocks_per_row;
3656         u64 first_row, last_row;
3657         u32 first_row_offset, last_row_offset;
3658         u32 first_column, last_column;
3659         u64 r0_first_row, r0_last_row;
3660         u32 r5or6_blocks_per_row;
3661         u64 r5or6_first_row, r5or6_last_row;
3662         u32 r5or6_first_row_offset, r5or6_last_row_offset;
3663         u32 r5or6_first_column, r5or6_last_column;
3664         u32 total_disks_per_row;
3665         u32 stripesize;
3666         u32 first_group, last_group, current_group;
3667         u32 map_row;
3668         u32 disk_handle;
3669         u64 disk_block;
3670         u32 disk_block_cnt;
3671         u8 cdb[16];
3672         u8 cdb_len;
3673 #if BITS_PER_LONG == 32
3674         u64 tmpdiv;
3675 #endif
3676         int offload_to_mirror;
3677
3678         BUG_ON(!(dev->offload_config && dev->offload_enabled));
3679
3680         /* check for valid opcode, get LBA and block count */
3681         switch (cmd->cmnd[0]) {
3682         case WRITE_6:
3683                 is_write = 1;
3684         case READ_6:
3685                 first_block =
3686                         (((u64) cmd->cmnd[2]) << 8) |
3687                         cmd->cmnd[3];
3688                 block_cnt = cmd->cmnd[4];
3689                 break;
3690         case WRITE_10:
3691                 is_write = 1;
3692         case READ_10:
3693                 first_block =
3694                         (((u64) cmd->cmnd[2]) << 24) |
3695                         (((u64) cmd->cmnd[3]) << 16) |
3696                         (((u64) cmd->cmnd[4]) << 8) |
3697                         cmd->cmnd[5];
3698                 block_cnt =
3699                         (((u32) cmd->cmnd[7]) << 8) |
3700                         cmd->cmnd[8];
3701                 break;
3702         case WRITE_12:
3703                 is_write = 1;
3704         case READ_12:
3705                 first_block =
3706                         (((u64) cmd->cmnd[2]) << 24) |
3707                         (((u64) cmd->cmnd[3]) << 16) |
3708                         (((u64) cmd->cmnd[4]) << 8) |
3709                         cmd->cmnd[5];
3710                 block_cnt =
3711                         (((u32) cmd->cmnd[6]) << 24) |
3712                         (((u32) cmd->cmnd[7]) << 16) |
3713                         (((u32) cmd->cmnd[8]) << 8) |
3714                 cmd->cmnd[9];
3715                 break;
3716         case WRITE_16:
3717                 is_write = 1;
3718         case READ_16:
3719                 first_block =
3720                         (((u64) cmd->cmnd[2]) << 56) |
3721                         (((u64) cmd->cmnd[3]) << 48) |
3722                         (((u64) cmd->cmnd[4]) << 40) |
3723                         (((u64) cmd->cmnd[5]) << 32) |
3724                         (((u64) cmd->cmnd[6]) << 24) |
3725                         (((u64) cmd->cmnd[7]) << 16) |
3726                         (((u64) cmd->cmnd[8]) << 8) |
3727                         cmd->cmnd[9];
3728                 block_cnt =
3729                         (((u32) cmd->cmnd[10]) << 24) |
3730                         (((u32) cmd->cmnd[11]) << 16) |
3731                         (((u32) cmd->cmnd[12]) << 8) |
3732                         cmd->cmnd[13];
3733                 break;
3734         default:
3735                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3736         }
3737         BUG_ON(block_cnt == 0);
3738         last_block = first_block + block_cnt - 1;
3739
3740         /* check for write to non-RAID-0 */
3741         if (is_write && dev->raid_level != 0)
3742                 return IO_ACCEL_INELIGIBLE;
3743
3744         /* check for invalid block or wraparound */
3745         if (last_block >= map->volume_blk_cnt || last_block < first_block)
3746                 return IO_ACCEL_INELIGIBLE;
3747
3748         /* calculate stripe information for the request */
3749         blocks_per_row = map->data_disks_per_row * map->strip_size;
3750 #if BITS_PER_LONG == 32
3751         tmpdiv = first_block;
3752         (void) do_div(tmpdiv, blocks_per_row);
3753         first_row = tmpdiv;
3754         tmpdiv = last_block;
3755         (void) do_div(tmpdiv, blocks_per_row);
3756         last_row = tmpdiv;
3757         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3758         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3759         tmpdiv = first_row_offset;
3760         (void) do_div(tmpdiv,  map->strip_size);
3761         first_column = tmpdiv;
3762         tmpdiv = last_row_offset;
3763         (void) do_div(tmpdiv, map->strip_size);
3764         last_column = tmpdiv;
3765 #else
3766         first_row = first_block / blocks_per_row;
3767         last_row = last_block / blocks_per_row;
3768         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3769         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3770         first_column = first_row_offset / map->strip_size;
3771         last_column = last_row_offset / map->strip_size;
3772 #endif
3773
3774         /* if this isn't a single row/column then give to the controller */
3775         if ((first_row != last_row) || (first_column != last_column))
3776                 return IO_ACCEL_INELIGIBLE;
3777
3778         /* proceeding with driver mapping */
3779         total_disks_per_row = map->data_disks_per_row +
3780                                 map->metadata_disks_per_row;
3781         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3782                                 map->row_cnt;
3783         map_index = (map_row * total_disks_per_row) + first_column;
3784
3785         switch (dev->raid_level) {
3786         case HPSA_RAID_0:
3787                 break; /* nothing special to do */
3788         case HPSA_RAID_1:
3789                 /* Handles load balance across RAID 1 members.
3790                  * (2-drive R1 and R10 with even # of drives.)
3791                  * Appropriate for SSDs, not optimal for HDDs
3792                  */
3793                 BUG_ON(map->layout_map_count != 2);
3794                 if (dev->offload_to_mirror)
3795                         map_index += map->data_disks_per_row;
3796                 dev->offload_to_mirror = !dev->offload_to_mirror;
3797                 break;
3798         case HPSA_RAID_ADM:
3799                 /* Handles N-way mirrors  (R1-ADM)
3800                  * and R10 with # of drives divisible by 3.)
3801                  */
3802                 BUG_ON(map->layout_map_count != 3);
3803
3804                 offload_to_mirror = dev->offload_to_mirror;
3805                 raid_map_helper(map, offload_to_mirror,
3806                                 &map_index, &current_group);
3807                 /* set mirror group to use next time */
3808                 offload_to_mirror =
3809                         (offload_to_mirror >= map->layout_map_count - 1)
3810                         ? 0 : offload_to_mirror + 1;
3811                 /* FIXME: remove after debug/dev */
3812                 BUG_ON(offload_to_mirror >= map->layout_map_count);
3813                 dev_warn(&h->pdev->dev,
3814                         "DEBUG: Using physical disk map index %d from mirror group %d\n",
3815                         map_index, offload_to_mirror);
3816                 dev->offload_to_mirror = offload_to_mirror;
3817                 /* Avoid direct use of dev->offload_to_mirror within this
3818                  * function since multiple threads might simultaneously
3819                  * increment it beyond the range of dev->layout_map_count -1.
3820                  */
3821                 break;
3822         case HPSA_RAID_5:
3823         case HPSA_RAID_6:
3824                 if (map->layout_map_count <= 1)
3825                         break;
3826
3827                 /* Verify first and last block are in same RAID group */
3828                 r5or6_blocks_per_row =
3829                         map->strip_size * map->data_disks_per_row;
3830                 BUG_ON(r5or6_blocks_per_row == 0);
3831                 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3832 #if BITS_PER_LONG == 32
3833                 tmpdiv = first_block;
3834                 first_group = do_div(tmpdiv, stripesize);
3835                 tmpdiv = first_group;
3836                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3837                 first_group = tmpdiv;
3838                 tmpdiv = last_block;
3839                 last_group = do_div(tmpdiv, stripesize);
3840                 tmpdiv = last_group;
3841                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3842                 last_group = tmpdiv;
3843 #else
3844                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3845                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3846 #endif
3847                 if (first_group != last_group)
3848                         return IO_ACCEL_INELIGIBLE;
3849
3850                 /* Verify request is in a single row of RAID 5/6 */
3851 #if BITS_PER_LONG == 32
3852                 tmpdiv = first_block;
3853                 (void) do_div(tmpdiv, stripesize);
3854                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3855                 tmpdiv = last_block;
3856                 (void) do_div(tmpdiv, stripesize);
3857                 r5or6_last_row = r0_last_row = tmpdiv;
3858 #else
3859                 first_row = r5or6_first_row = r0_first_row =
3860                                                 first_block / stripesize;
3861                 r5or6_last_row = r0_last_row = last_block / stripesize;
3862 #endif
3863                 if (r5or6_first_row != r5or6_last_row)
3864                         return IO_ACCEL_INELIGIBLE;
3865
3866
3867                 /* Verify request is in a single column */
3868 #if BITS_PER_LONG == 32
3869                 tmpdiv = first_block;
3870                 first_row_offset = do_div(tmpdiv, stripesize);
3871                 tmpdiv = first_row_offset;
3872                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3873                 r5or6_first_row_offset = first_row_offset;
3874                 tmpdiv = last_block;
3875                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3876                 tmpdiv = r5or6_last_row_offset;
3877                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3878                 tmpdiv = r5or6_first_row_offset;
3879                 (void) do_div(tmpdiv, map->strip_size);
3880                 first_column = r5or6_first_column = tmpdiv;
3881                 tmpdiv = r5or6_last_row_offset;
3882                 (void) do_div(tmpdiv, map->strip_size);
3883                 r5or6_last_column = tmpdiv;
3884 #else
3885                 first_row_offset = r5or6_first_row_offset =
3886                         (u32)((first_block % stripesize) %
3887                                                 r5or6_blocks_per_row);
3888
3889                 r5or6_last_row_offset =
3890                         (u32)((last_block % stripesize) %
3891                                                 r5or6_blocks_per_row);
3892
3893                 first_column = r5or6_first_column =
3894                         r5or6_first_row_offset / map->strip_size;
3895                 r5or6_last_column =
3896                         r5or6_last_row_offset / map->strip_size;
3897 #endif
3898                 if (r5or6_first_column != r5or6_last_column)
3899                         return IO_ACCEL_INELIGIBLE;
3900
3901                 /* Request is eligible */
3902                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3903                         map->row_cnt;
3904
3905                 map_index = (first_group *
3906                         (map->row_cnt * total_disks_per_row)) +
3907                         (map_row * total_disks_per_row) + first_column;
3908                 break;
3909         default:
3910                 return IO_ACCEL_INELIGIBLE;
3911         }
3912
3913         disk_handle = dd[map_index].ioaccel_handle;
3914         disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3915                         (first_row_offset - (first_column * map->strip_size));
3916         disk_block_cnt = block_cnt;
3917
3918         /* handle differing logical/physical block sizes */
3919         if (map->phys_blk_shift) {
3920                 disk_block <<= map->phys_blk_shift;
3921                 disk_block_cnt <<= map->phys_blk_shift;
3922         }
3923         BUG_ON(disk_block_cnt > 0xffff);
3924
3925         /* build the new CDB for the physical disk I/O */
3926         if (disk_block > 0xffffffff) {
3927                 cdb[0] = is_write ? WRITE_16 : READ_16;
3928                 cdb[1] = 0;
3929                 cdb[2] = (u8) (disk_block >> 56);
3930                 cdb[3] = (u8) (disk_block >> 48);
3931                 cdb[4] = (u8) (disk_block >> 40);
3932                 cdb[5] = (u8) (disk_block >> 32);
3933                 cdb[6] = (u8) (disk_block >> 24);
3934                 cdb[7] = (u8) (disk_block >> 16);
3935                 cdb[8] = (u8) (disk_block >> 8);
3936                 cdb[9] = (u8) (disk_block);
3937                 cdb[10] = (u8) (disk_block_cnt >> 24);
3938                 cdb[11] = (u8) (disk_block_cnt >> 16);
3939                 cdb[12] = (u8) (disk_block_cnt >> 8);
3940                 cdb[13] = (u8) (disk_block_cnt);
3941                 cdb[14] = 0;
3942                 cdb[15] = 0;
3943                 cdb_len = 16;
3944         } else {
3945                 cdb[0] = is_write ? WRITE_10 : READ_10;
3946                 cdb[1] = 0;
3947                 cdb[2] = (u8) (disk_block >> 24);
3948                 cdb[3] = (u8) (disk_block >> 16);
3949                 cdb[4] = (u8) (disk_block >> 8);
3950                 cdb[5] = (u8) (disk_block);
3951                 cdb[6] = 0;
3952                 cdb[7] = (u8) (disk_block_cnt >> 8);
3953                 cdb[8] = (u8) (disk_block_cnt);
3954                 cdb[9] = 0;
3955                 cdb_len = 10;
3956         }
3957         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3958                                                 dev->scsi3addr);
3959 }
3960
3961 static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3962         void (*done)(struct scsi_cmnd *))
3963 {
3964         struct ctlr_info *h;
3965         struct hpsa_scsi_dev_t *dev;
3966         unsigned char scsi3addr[8];
3967         struct CommandList *c;
3968         int rc = 0;
3969
3970         /* Get the ptr to our adapter structure out of cmd->host. */
3971         h = sdev_to_hba(cmd->device);
3972         dev = cmd->device->hostdata;
3973         if (!dev) {
3974                 cmd->result = DID_NO_CONNECT << 16;
3975                 done(cmd);
3976                 return 0;
3977         }
3978         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3979
3980         if (unlikely(lockup_detected(h))) {
3981                 cmd->result = DID_ERROR << 16;
3982                 done(cmd);
3983                 return 0;
3984         }
3985         c = cmd_alloc(h);
3986         if (c == NULL) {                        /* trouble... */
3987                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3988                 return SCSI_MLQUEUE_HOST_BUSY;
3989         }
3990
3991         /* Fill in the command list header */
3992
3993         cmd->scsi_done = done;    /* save this for use by completion code */
3994
3995         /* save c in case we have to abort it  */
3996         cmd->host_scribble = (unsigned char *) c;
3997
3998         c->cmd_type = CMD_SCSI;
3999         c->scsi_cmd = cmd;
4000
4001         /* Call alternate submit routine for I/O accelerated commands.
4002          * Retries always go down the normal I/O path.
4003          */
4004         if (likely(cmd->retries == 0 &&
4005                 cmd->request->cmd_type == REQ_TYPE_FS &&
4006                 h->acciopath_status)) {
4007                 if (dev->offload_enabled) {
4008                         rc = hpsa_scsi_ioaccel_raid_map(h, c);
4009                         if (rc == 0)
4010                                 return 0; /* Sent on ioaccel path */
4011                         if (rc < 0) {   /* scsi_dma_map failed. */
4012                                 cmd_free(h, c);
4013                                 return SCSI_MLQUEUE_HOST_BUSY;
4014                         }
4015                 } else if (dev->ioaccel_handle) {
4016                         rc = hpsa_scsi_ioaccel_direct_map(h, c);
4017                         if (rc == 0)
4018                                 return 0; /* Sent on direct map path */
4019                         if (rc < 0) {   /* scsi_dma_map failed. */
4020                                 cmd_free(h, c);
4021                                 return SCSI_MLQUEUE_HOST_BUSY;
4022                         }
4023                 }
4024         }
4025
4026         c->Header.ReplyQueue = 0;  /* unused in simple mode */
4027         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4028         c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
4029         c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
4030
4031         /* Fill in the request block... */
4032
4033         c->Request.Timeout = 0;
4034         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4035         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4036         c->Request.CDBLen = cmd->cmd_len;
4037         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4038         c->Request.Type.Type = TYPE_CMD;
4039         c->Request.Type.Attribute = ATTR_SIMPLE;
4040         switch (cmd->sc_data_direction) {
4041         case DMA_TO_DEVICE:
4042                 c->Request.Type.Direction = XFER_WRITE;
4043                 break;
4044         case DMA_FROM_DEVICE:
4045                 c->Request.Type.Direction = XFER_READ;
4046                 break;
4047         case DMA_NONE:
4048                 c->Request.Type.Direction = XFER_NONE;
4049                 break;
4050         case DMA_BIDIRECTIONAL:
4051                 /* This can happen if a buggy application does a scsi passthru
4052                  * and sets both inlen and outlen to non-zero. ( see
4053                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4054                  */
4055
4056                 c->Request.Type.Direction = XFER_RSVD;
4057                 /* This is technically wrong, and hpsa controllers should
4058                  * reject it with CMD_INVALID, which is the most correct
4059                  * response, but non-fibre backends appear to let it
4060                  * slide by, and give the same results as if this field
4061                  * were set correctly.  Either way is acceptable for
4062                  * our purposes here.
4063                  */
4064
4065                 break;
4066
4067         default:
4068                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4069                         cmd->sc_data_direction);
4070                 BUG();
4071                 break;
4072         }
4073
4074         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4075                 cmd_free(h, c);
4076                 return SCSI_MLQUEUE_HOST_BUSY;
4077         }
4078         enqueue_cmd_and_start_io(h, c);
4079         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4080         return 0;
4081 }
4082
4083 static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4084
4085 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4086 {
4087         unsigned long flags;
4088
4089         /*
4090          * Don't let rescans be initiated on a controller known
4091          * to be locked up.  If the controller locks up *during*
4092          * a rescan, that thread is probably hosed, but at least
4093          * we can prevent new rescan threads from piling up on a
4094          * locked up controller.
4095          */
4096         if (unlikely(lockup_detected(h))) {
4097                 spin_lock_irqsave(&h->scan_lock, flags);
4098                 h->scan_finished = 1;
4099                 wake_up_all(&h->scan_wait_queue);
4100                 spin_unlock_irqrestore(&h->scan_lock, flags);
4101                 return 1;
4102         }
4103         return 0;
4104 }
4105
4106 static void hpsa_scan_start(struct Scsi_Host *sh)
4107 {
4108         struct ctlr_info *h = shost_to_hba(sh);
4109         unsigned long flags;
4110
4111         if (do_not_scan_if_controller_locked_up(h))
4112                 return;
4113
4114         /* wait until any scan already in progress is finished. */
4115         while (1) {
4116                 spin_lock_irqsave(&h->scan_lock, flags);
4117                 if (h->scan_finished)
4118                         break;
4119                 spin_unlock_irqrestore(&h->scan_lock, flags);
4120                 wait_event(h->scan_wait_queue, h->scan_finished);
4121                 /* Note: We don't need to worry about a race between this
4122                  * thread and driver unload because the midlayer will
4123                  * have incremented the reference count, so unload won't
4124                  * happen if we're in here.
4125                  */
4126         }
4127         h->scan_finished = 0; /* mark scan as in progress */
4128         spin_unlock_irqrestore(&h->scan_lock, flags);
4129
4130         if (do_not_scan_if_controller_locked_up(h))
4131                 return;
4132
4133         hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4134
4135         spin_lock_irqsave(&h->scan_lock, flags);
4136         h->scan_finished = 1; /* mark scan as finished. */
4137         wake_up_all(&h->scan_wait_queue);
4138         spin_unlock_irqrestore(&h->scan_lock, flags);
4139 }
4140
4141 static int hpsa_scan_finished(struct Scsi_Host *sh,
4142         unsigned long elapsed_time)
4143 {
4144         struct ctlr_info *h = shost_to_hba(sh);
4145         unsigned long flags;
4146         int finished;
4147
4148         spin_lock_irqsave(&h->scan_lock, flags);
4149         finished = h->scan_finished;
4150         spin_unlock_irqrestore(&h->scan_lock, flags);
4151         return finished;
4152 }
4153
4154 static int hpsa_change_queue_depth(struct scsi_device *sdev,
4155         int qdepth, int reason)
4156 {
4157         struct ctlr_info *h = sdev_to_hba(sdev);
4158
4159         if (reason != SCSI_QDEPTH_DEFAULT)
4160                 return -ENOTSUPP;
4161
4162         if (qdepth < 1)
4163                 qdepth = 1;
4164         else
4165                 if (qdepth > h->nr_cmds)
4166                         qdepth = h->nr_cmds;
4167         scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4168         return sdev->queue_depth;
4169 }
4170
4171 static void hpsa_unregister_scsi(struct ctlr_info *h)
4172 {
4173         /* we are being forcibly unloaded, and may not refuse. */
4174         scsi_remove_host(h->scsi_host);
4175         scsi_host_put(h->scsi_host);
4176         h->scsi_host = NULL;
4177 }
4178
4179 static int hpsa_register_scsi(struct ctlr_info *h)
4180 {
4181         struct Scsi_Host *sh;
4182         int error;
4183
4184         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4185         if (sh == NULL)
4186                 goto fail;
4187
4188         sh->io_port = 0;
4189         sh->n_io_port = 0;
4190         sh->this_id = -1;
4191         sh->max_channel = 3;
4192         sh->max_cmd_len = MAX_COMMAND_SIZE;
4193         sh->max_lun = HPSA_MAX_LUN;
4194         sh->max_id = HPSA_MAX_LUN;
4195         sh->can_queue = h->nr_cmds;
4196         if (h->hba_mode_enabled)
4197                 sh->cmd_per_lun = 7;
4198         else
4199                 sh->cmd_per_lun = h->nr_cmds;
4200         sh->sg_tablesize = h->maxsgentries;
4201         h->scsi_host = sh;
4202         sh->hostdata[0] = (unsigned long) h;
4203         sh->irq = h->intr[h->intr_mode];
4204         sh->unique_id = sh->irq;
4205         error = scsi_add_host(sh, &h->pdev->dev);
4206         if (error)
4207                 goto fail_host_put;
4208         scsi_scan_host(sh);
4209         return 0;
4210
4211  fail_host_put:
4212         dev_err(&h->pdev->dev, "%s: scsi_add_host"
4213                 " failed for controller %d\n", __func__, h->ctlr);
4214         scsi_host_put(sh);
4215         return error;
4216  fail:
4217         dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4218                 " failed for controller %d\n", __func__, h->ctlr);
4219         return -ENOMEM;
4220 }
4221
4222 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4223         unsigned char lunaddr[])
4224 {
4225         int rc;
4226         int count = 0;
4227         int waittime = 1; /* seconds */
4228         struct CommandList *c;
4229
4230         c = cmd_special_alloc(h);
4231         if (!c) {
4232                 dev_warn(&h->pdev->dev, "out of memory in "
4233                         "wait_for_device_to_become_ready.\n");
4234                 return IO_ERROR;
4235         }
4236
4237         /* Send test unit ready until device ready, or give up. */
4238         while (count < HPSA_TUR_RETRY_LIMIT) {
4239
4240                 /* Wait for a bit.  do this first, because if we send
4241                  * the TUR right away, the reset will just abort it.
4242                  */
4243                 msleep(1000 * waittime);
4244                 count++;
4245                 rc = 0; /* Device ready. */
4246
4247                 /* Increase wait time with each try, up to a point. */
4248                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4249                         waittime = waittime * 2;
4250
4251                 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4252                 (void) fill_cmd(c, TEST_UNIT_READY, h,
4253                                 NULL, 0, 0, lunaddr, TYPE_CMD);
4254                 hpsa_scsi_do_simple_cmd_core(h, c);
4255                 /* no unmap needed here because no data xfer. */
4256
4257                 if (c->err_info->CommandStatus == CMD_SUCCESS)
4258                         break;
4259
4260                 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4261                         c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4262                         (c->err_info->SenseInfo[2] == NO_SENSE ||
4263                         c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4264                         break;
4265
4266                 dev_warn(&h->pdev->dev, "waiting %d secs "
4267                         "for device to become ready.\n", waittime);
4268                 rc = 1; /* device not ready. */
4269         }
4270
4271         if (rc)
4272                 dev_warn(&h->pdev->dev, "giving up on device.\n");
4273         else
4274                 dev_warn(&h->pdev->dev, "device is ready.\n");
4275
4276         cmd_special_free(h, c);
4277         return rc;
4278 }
4279
4280 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4281  * complaining.  Doing a host- or bus-reset can't do anything good here.
4282  */
4283 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4284 {
4285         int rc;
4286         struct ctlr_info *h;
4287         struct hpsa_scsi_dev_t *dev;
4288
4289         /* find the controller to which the command to be aborted was sent */
4290         h = sdev_to_hba(scsicmd->device);
4291         if (h == NULL) /* paranoia */
4292                 return FAILED;
4293         dev = scsicmd->device->hostdata;
4294         if (!dev) {
4295                 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4296                         "device lookup failed.\n");
4297                 return FAILED;
4298         }
4299         dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4300                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4301         /* send a reset to the SCSI LUN which the command was sent to */
4302         rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4303         if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4304                 return SUCCESS;
4305
4306         dev_warn(&h->pdev->dev, "resetting device failed.\n");
4307         return FAILED;
4308 }
4309
4310 static void swizzle_abort_tag(u8 *tag)
4311 {
4312         u8 original_tag[8];
4313
4314         memcpy(original_tag, tag, 8);
4315         tag[0] = original_tag[3];
4316         tag[1] = original_tag[2];
4317         tag[2] = original_tag[1];
4318         tag[3] = original_tag[0];
4319         tag[4] = original_tag[7];
4320         tag[5] = original_tag[6];
4321         tag[6] = original_tag[5];
4322         tag[7] = original_tag[4];
4323 }
4324
4325 static void hpsa_get_tag(struct ctlr_info *h,
4326         struct CommandList *c, u32 *taglower, u32 *tagupper)
4327 {
4328         if (c->cmd_type == CMD_IOACCEL1) {
4329                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4330                         &h->ioaccel_cmd_pool[c->cmdindex];
4331                 *tagupper = cm1->Tag.upper;
4332                 *taglower = cm1->Tag.lower;
4333                 return;
4334         }
4335         if (c->cmd_type == CMD_IOACCEL2) {
4336                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4337                         &h->ioaccel2_cmd_pool[c->cmdindex];
4338                 /* upper tag not used in ioaccel2 mode */
4339                 memset(tagupper, 0, sizeof(*tagupper));
4340                 *taglower = cm2->Tag;
4341                 return;
4342         }
4343         *tagupper = c->Header.Tag.upper;
4344         *taglower = c->Header.Tag.lower;
4345 }
4346
4347
4348 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4349         struct CommandList *abort, int swizzle)
4350 {
4351         int rc = IO_OK;
4352         struct CommandList *c;
4353         struct ErrorInfo *ei;
4354         u32 tagupper, taglower;
4355
4356         c = cmd_special_alloc(h);
4357         if (c == NULL) {        /* trouble... */
4358                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4359                 return -ENOMEM;
4360         }
4361
4362         /* fill_cmd can't fail here, no buffer to map */
4363         (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4364                 0, 0, scsi3addr, TYPE_MSG);
4365         if (swizzle)
4366                 swizzle_abort_tag(&c->Request.CDB[4]);
4367         hpsa_scsi_do_simple_cmd_core(h, c);
4368         hpsa_get_tag(h, abort, &taglower, &tagupper);
4369         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4370                 __func__, tagupper, taglower);
4371         /* no unmap needed here because no data xfer. */
4372
4373         ei = c->err_info;
4374         switch (ei->CommandStatus) {
4375         case CMD_SUCCESS:
4376                 break;
4377         case CMD_UNABORTABLE: /* Very common, don't make noise. */
4378                 rc = -1;
4379                 break;
4380         default:
4381                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4382                         __func__, tagupper, taglower);
4383                 hpsa_scsi_interpret_error(h, c);
4384                 rc = -1;
4385                 break;
4386         }
4387         cmd_special_free(h, c);
4388         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4389                 __func__, tagupper, taglower);
4390         return rc;
4391 }
4392
4393 /*
4394  * hpsa_find_cmd_in_queue
4395  *
4396  * Used to determine whether a command (find) is still present
4397  * in queue_head.   Optionally excludes the last element of queue_head.
4398  *
4399  * This is used to avoid unnecessary aborts.  Commands in h->reqQ have
4400  * not yet been submitted, and so can be aborted by the driver without
4401  * sending an abort to the hardware.
4402  *
4403  * Returns pointer to command if found in queue, NULL otherwise.
4404  */
4405 static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4406                         struct scsi_cmnd *find, struct list_head *queue_head)
4407 {
4408         unsigned long flags;
4409         struct CommandList *c = NULL;   /* ptr into cmpQ */
4410
4411         if (!find)
4412                 return 0;
4413         spin_lock_irqsave(&h->lock, flags);
4414         list_for_each_entry(c, queue_head, list) {
4415                 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
4416                         continue;
4417                 if (c->scsi_cmd == find) {
4418                         spin_unlock_irqrestore(&h->lock, flags);
4419                         return c;
4420                 }
4421         }
4422         spin_unlock_irqrestore(&h->lock, flags);
4423         return NULL;
4424 }
4425
4426 static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4427                                         u8 *tag, struct list_head *queue_head)
4428 {
4429         unsigned long flags;
4430         struct CommandList *c;
4431
4432         spin_lock_irqsave(&h->lock, flags);
4433         list_for_each_entry(c, queue_head, list) {
4434                 if (memcmp(&c->Header.Tag, tag, 8) != 0)
4435                         continue;
4436                 spin_unlock_irqrestore(&h->lock, flags);
4437                 return c;
4438         }
4439         spin_unlock_irqrestore(&h->lock, flags);
4440         return NULL;
4441 }
4442
4443 /* ioaccel2 path firmware cannot handle abort task requests.
4444  * Change abort requests to physical target reset, and send to the
4445  * address of the physical disk used for the ioaccel 2 command.
4446  * Return 0 on success (IO_OK)
4447  *       -1 on failure
4448  */
4449
4450 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4451         unsigned char *scsi3addr, struct CommandList *abort)
4452 {
4453         int rc = IO_OK;
4454         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4455         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4456         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4457         unsigned char *psa = &phys_scsi3addr[0];
4458
4459         /* Get a pointer to the hpsa logical device. */
4460         scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4461         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4462         if (dev == NULL) {
4463                 dev_warn(&h->pdev->dev,
4464                         "Cannot abort: no device pointer for command.\n");
4465                         return -1; /* not abortable */
4466         }
4467
4468         if (h->raid_offload_debug > 0)
4469                 dev_info(&h->pdev->dev,
4470                         "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4471                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4472                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4473                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4474
4475         if (!dev->offload_enabled) {
4476                 dev_warn(&h->pdev->dev,
4477                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4478                 return -1; /* not abortable */
4479         }
4480
4481         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4482         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4483                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4484                 return -1; /* not abortable */
4485         }
4486
4487         /* send the reset */
4488         if (h->raid_offload_debug > 0)
4489                 dev_info(&h->pdev->dev,
4490                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4491                         psa[0], psa[1], psa[2], psa[3],
4492                         psa[4], psa[5], psa[6], psa[7]);
4493         rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4494         if (rc != 0) {
4495                 dev_warn(&h->pdev->dev,
4496                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4497                         psa[0], psa[1], psa[2], psa[3],
4498                         psa[4], psa[5], psa[6], psa[7]);
4499                 return rc; /* failed to reset */
4500         }
4501
4502         /* wait for device to recover */
4503         if (wait_for_device_to_become_ready(h, psa) != 0) {
4504                 dev_warn(&h->pdev->dev,
4505                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4506                         psa[0], psa[1], psa[2], psa[3],
4507                         psa[4], psa[5], psa[6], psa[7]);
4508                 return -1;  /* failed to recover */
4509         }
4510
4511         /* device recovered */
4512         dev_info(&h->pdev->dev,
4513                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4514                 psa[0], psa[1], psa[2], psa[3],
4515                 psa[4], psa[5], psa[6], psa[7]);
4516
4517         return rc; /* success */
4518 }
4519
4520 /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
4521  * tell which kind we're dealing with, so we send the abort both ways.  There
4522  * shouldn't be any collisions between swizzled and unswizzled tags due to the
4523  * way we construct our tags but we check anyway in case the assumptions which
4524  * make this true someday become false.
4525  */
4526 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4527         unsigned char *scsi3addr, struct CommandList *abort)
4528 {
4529         u8 swizzled_tag[8];
4530         struct CommandList *c;
4531         int rc = 0, rc2 = 0;
4532
4533         /* ioccelerator mode 2 commands should be aborted via the
4534          * accelerated path, since RAID path is unaware of these commands,
4535          * but underlying firmware can't handle abort TMF.
4536          * Change abort to physical device reset.
4537          */
4538         if (abort->cmd_type == CMD_IOACCEL2)
4539                 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4540
4541         /* we do not expect to find the swizzled tag in our queue, but
4542          * check anyway just to be sure the assumptions which make this
4543          * the case haven't become wrong.
4544          */
4545         memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
4546         swizzle_abort_tag(swizzled_tag);
4547         c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
4548         if (c != NULL) {
4549                 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
4550                 return hpsa_send_abort(h, scsi3addr, abort, 0);
4551         }
4552         rc = hpsa_send_abort(h, scsi3addr, abort, 0);
4553
4554         /* if the command is still in our queue, we can't conclude that it was
4555          * aborted (it might have just completed normally) but in any case
4556          * we don't need to try to abort it another way.
4557          */
4558         c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
4559         if (c)
4560                 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
4561         return rc && rc2;
4562 }
4563
4564 /* Send an abort for the specified command.
4565  *      If the device and controller support it,
4566  *              send a task abort request.
4567  */
4568 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4569 {
4570
4571         int i, rc;
4572         struct ctlr_info *h;
4573         struct hpsa_scsi_dev_t *dev;
4574         struct CommandList *abort; /* pointer to command to be aborted */
4575         struct CommandList *found;
4576         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
4577         char msg[256];          /* For debug messaging. */
4578         int ml = 0;
4579         u32 tagupper, taglower;
4580
4581         /* Find the controller of the command to be aborted */
4582         h = sdev_to_hba(sc->device);
4583         if (WARN(h == NULL,
4584                         "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4585                 return FAILED;
4586
4587         /* Check that controller supports some kind of task abort */
4588         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4589                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4590                 return FAILED;
4591
4592         memset(msg, 0, sizeof(msg));
4593         ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
4594                 h->scsi_host->host_no, sc->device->channel,
4595                 sc->device->id, sc->device->lun);
4596
4597         /* Find the device of the command to be aborted */
4598         dev = sc->device->hostdata;
4599         if (!dev) {
4600                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4601                                 msg);
4602                 return FAILED;
4603         }
4604
4605         /* Get SCSI command to be aborted */
4606         abort = (struct CommandList *) sc->host_scribble;
4607         if (abort == NULL) {
4608                 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4609                                 msg);
4610                 return FAILED;
4611         }
4612         hpsa_get_tag(h, abort, &taglower, &tagupper);
4613         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4614         as  = (struct scsi_cmnd *) abort->scsi_cmd;
4615         if (as != NULL)
4616                 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4617                         as->cmnd[0], as->serial_number);
4618         dev_dbg(&h->pdev->dev, "%s\n", msg);
4619         dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4620                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4621
4622         /* Search reqQ to See if command is queued but not submitted,
4623          * if so, complete the command with aborted status and remove
4624          * it from the reqQ.
4625          */
4626         found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
4627         if (found) {
4628                 found->err_info->CommandStatus = CMD_ABORTED;
4629                 finish_cmd(found);
4630                 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
4631                                 msg);
4632                 return SUCCESS;
4633         }
4634
4635         /* not in reqQ, if also not in cmpQ, must have already completed */
4636         found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4637         if (!found)  {
4638                 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
4639                                 msg);
4640                 return SUCCESS;
4641         }
4642
4643         /*
4644          * Command is in flight, or possibly already completed
4645          * by the firmware (but not to the scsi mid layer) but we can't
4646          * distinguish which.  Send the abort down.
4647          */
4648         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4649         if (rc != 0) {
4650                 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4651                 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4652                         h->scsi_host->host_no,
4653                         dev->bus, dev->target, dev->lun);
4654                 return FAILED;
4655         }
4656         dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4657
4658         /* If the abort(s) above completed and actually aborted the
4659          * command, then the command to be aborted should already be
4660          * completed.  If not, wait around a bit more to see if they
4661          * manage to complete normally.
4662          */
4663 #define ABORT_COMPLETE_WAIT_SECS 30
4664         for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4665                 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4666                 if (!found)
4667                         return SUCCESS;
4668                 msleep(100);
4669         }
4670         dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4671                 msg, ABORT_COMPLETE_WAIT_SECS);
4672         return FAILED;
4673 }
4674
4675
4676 /*
4677  * For operations that cannot sleep, a command block is allocated at init,
4678  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4679  * which ones are free or in use.  Lock must be held when calling this.
4680  * cmd_free() is the complement.
4681  */
4682 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4683 {
4684         struct CommandList *c;
4685         int i;
4686         union u64bit temp64;
4687         dma_addr_t cmd_dma_handle, err_dma_handle;
4688         unsigned long flags;
4689
4690         spin_lock_irqsave(&h->lock, flags);
4691         do {
4692                 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
4693                 if (i == h->nr_cmds) {
4694                         spin_unlock_irqrestore(&h->lock, flags);
4695                         return NULL;
4696                 }
4697         } while (test_and_set_bit
4698                  (i & (BITS_PER_LONG - 1),
4699                   h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
4700         spin_unlock_irqrestore(&h->lock, flags);
4701
4702         c = h->cmd_pool + i;
4703         memset(c, 0, sizeof(*c));
4704         cmd_dma_handle = h->cmd_pool_dhandle
4705             + i * sizeof(*c);
4706         c->err_info = h->errinfo_pool + i;
4707         memset(c->err_info, 0, sizeof(*c->err_info));
4708         err_dma_handle = h->errinfo_pool_dhandle
4709             + i * sizeof(*c->err_info);
4710
4711         c->cmdindex = i;
4712
4713         INIT_LIST_HEAD(&c->list);
4714         c->busaddr = (u32) cmd_dma_handle;
4715         temp64.val = (u64) err_dma_handle;
4716         c->ErrDesc.Addr.lower = temp64.val32.lower;
4717         c->ErrDesc.Addr.upper = temp64.val32.upper;
4718         c->ErrDesc.Len = sizeof(*c->err_info);
4719
4720         c->h = h;
4721         return c;
4722 }
4723
4724 /* For operations that can wait for kmalloc to possibly sleep,
4725  * this routine can be called. Lock need not be held to call
4726  * cmd_special_alloc. cmd_special_free() is the complement.
4727  */
4728 static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4729 {
4730         struct CommandList *c;
4731         union u64bit temp64;
4732         dma_addr_t cmd_dma_handle, err_dma_handle;
4733
4734         c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
4735         if (c == NULL)
4736                 return NULL;
4737         memset(c, 0, sizeof(*c));
4738
4739         c->cmd_type = CMD_SCSI;
4740         c->cmdindex = -1;
4741
4742         c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
4743                     &err_dma_handle);
4744
4745         if (c->err_info == NULL) {
4746                 pci_free_consistent(h->pdev,
4747                         sizeof(*c), c, cmd_dma_handle);
4748                 return NULL;
4749         }
4750         memset(c->err_info, 0, sizeof(*c->err_info));
4751
4752         INIT_LIST_HEAD(&c->list);
4753         c->busaddr = (u32) cmd_dma_handle;
4754         temp64.val = (u64) err_dma_handle;
4755         c->ErrDesc.Addr.lower = temp64.val32.lower;
4756         c->ErrDesc.Addr.upper = temp64.val32.upper;
4757         c->ErrDesc.Len = sizeof(*c->err_info);
4758
4759         c->h = h;
4760         return c;
4761 }
4762
4763 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4764 {
4765         int i;
4766         unsigned long flags;
4767
4768         i = c - h->cmd_pool;
4769         spin_lock_irqsave(&h->lock, flags);
4770         clear_bit(i & (BITS_PER_LONG - 1),
4771                   h->cmd_pool_bits + (i / BITS_PER_LONG));
4772         spin_unlock_irqrestore(&h->lock, flags);
4773 }
4774
4775 static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4776 {
4777         union u64bit temp64;
4778
4779         temp64.val32.lower = c->ErrDesc.Addr.lower;
4780         temp64.val32.upper = c->ErrDesc.Addr.upper;
4781         pci_free_consistent(h->pdev, sizeof(*c->err_info),
4782                             c->err_info, (dma_addr_t) temp64.val);
4783         pci_free_consistent(h->pdev, sizeof(*c),
4784                             c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
4785 }
4786
4787 #ifdef CONFIG_COMPAT
4788
4789 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4790 {
4791         IOCTL32_Command_struct __user *arg32 =
4792             (IOCTL32_Command_struct __user *) arg;
4793         IOCTL_Command_struct arg64;
4794         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4795         int err;
4796         u32 cp;
4797
4798         memset(&arg64, 0, sizeof(arg64));
4799         err = 0;
4800         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4801                            sizeof(arg64.LUN_info));
4802         err |= copy_from_user(&arg64.Request, &arg32->Request,
4803                            sizeof(arg64.Request));
4804         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4805                            sizeof(arg64.error_info));
4806         err |= get_user(arg64.buf_size, &arg32->buf_size);
4807         err |= get_user(cp, &arg32->buf);
4808         arg64.buf = compat_ptr(cp);
4809         err |= copy_to_user(p, &arg64, sizeof(arg64));
4810
4811         if (err)
4812                 return -EFAULT;
4813
4814         err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
4815         if (err)
4816                 return err;
4817         err |= copy_in_user(&arg32->error_info, &p->error_info,
4818                          sizeof(arg32->error_info));
4819         if (err)
4820                 return -EFAULT;
4821         return err;
4822 }
4823
4824 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4825         int cmd, void *arg)
4826 {
4827         BIG_IOCTL32_Command_struct __user *arg32 =
4828             (BIG_IOCTL32_Command_struct __user *) arg;
4829         BIG_IOCTL_Command_struct arg64;
4830         BIG_IOCTL_Command_struct __user *p =
4831             compat_alloc_user_space(sizeof(arg64));
4832         int err;
4833         u32 cp;
4834
4835         memset(&arg64, 0, sizeof(arg64));
4836         err = 0;
4837         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4838                            sizeof(arg64.LUN_info));
4839         err |= copy_from_user(&arg64.Request, &arg32->Request,
4840                            sizeof(arg64.Request));
4841         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4842                            sizeof(arg64.error_info));
4843         err |= get_user(arg64.buf_size, &arg32->buf_size);
4844         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4845         err |= get_user(cp, &arg32->buf);
4846         arg64.buf = compat_ptr(cp);
4847         err |= copy_to_user(p, &arg64, sizeof(arg64));
4848
4849         if (err)
4850                 return -EFAULT;
4851
4852         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
4853         if (err)
4854                 return err;
4855         err |= copy_in_user(&arg32->error_info, &p->error_info,
4856                          sizeof(arg32->error_info));
4857         if (err)
4858                 return -EFAULT;
4859         return err;
4860 }
4861
4862 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
4863 {
4864         switch (cmd) {
4865         case CCISS_GETPCIINFO:
4866         case CCISS_GETINTINFO:
4867         case CCISS_SETINTINFO:
4868         case CCISS_GETNODENAME:
4869         case CCISS_SETNODENAME:
4870         case CCISS_GETHEARTBEAT:
4871         case CCISS_GETBUSTYPES:
4872         case CCISS_GETFIRMVER:
4873         case CCISS_GETDRIVVER:
4874         case CCISS_REVALIDVOLS:
4875         case CCISS_DEREGDISK:
4876         case CCISS_REGNEWDISK:
4877         case CCISS_REGNEWD:
4878         case CCISS_RESCANDISK:
4879         case CCISS_GETLUNINFO:
4880                 return hpsa_ioctl(dev, cmd, arg);
4881
4882         case CCISS_PASSTHRU32:
4883                 return hpsa_ioctl32_passthru(dev, cmd, arg);
4884         case CCISS_BIG_PASSTHRU32:
4885                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4886
4887         default:
4888                 return -ENOIOCTLCMD;
4889         }
4890 }
4891 #endif
4892
4893 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4894 {
4895         struct hpsa_pci_info pciinfo;
4896
4897         if (!argp)
4898                 return -EINVAL;
4899         pciinfo.domain = pci_domain_nr(h->pdev->bus);
4900         pciinfo.bus = h->pdev->bus->number;
4901         pciinfo.dev_fn = h->pdev->devfn;
4902         pciinfo.board_id = h->board_id;
4903         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4904                 return -EFAULT;
4905         return 0;
4906 }
4907
4908 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4909 {
4910         DriverVer_type DriverVer;
4911         unsigned char vmaj, vmin, vsubmin;
4912         int rc;
4913
4914         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4915                 &vmaj, &vmin, &vsubmin);
4916         if (rc != 3) {
4917                 dev_info(&h->pdev->dev, "driver version string '%s' "
4918                         "unrecognized.", HPSA_DRIVER_VERSION);
4919                 vmaj = 0;
4920                 vmin = 0;
4921                 vsubmin = 0;
4922         }
4923         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4924         if (!argp)
4925                 return -EINVAL;
4926         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4927                 return -EFAULT;
4928         return 0;
4929 }
4930
4931 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4932 {
4933         IOCTL_Command_struct iocommand;
4934         struct CommandList *c;
4935         char *buff = NULL;
4936         union u64bit temp64;
4937         int rc = 0;
4938
4939         if (!argp)
4940                 return -EINVAL;
4941         if (!capable(CAP_SYS_RAWIO))
4942                 return -EPERM;
4943         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4944                 return -EFAULT;
4945         if ((iocommand.buf_size < 1) &&
4946             (iocommand.Request.Type.Direction != XFER_NONE)) {
4947                 return -EINVAL;
4948         }
4949         if (iocommand.buf_size > 0) {
4950                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4951                 if (buff == NULL)
4952                         return -EFAULT;
4953                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4954                         /* Copy the data into the buffer we created */
4955                         if (copy_from_user(buff, iocommand.buf,
4956                                 iocommand.buf_size)) {
4957                                 rc = -EFAULT;
4958                                 goto out_kfree;
4959                         }
4960                 } else {
4961                         memset(buff, 0, iocommand.buf_size);
4962                 }
4963         }
4964         c = cmd_special_alloc(h);
4965         if (c == NULL) {
4966                 rc = -ENOMEM;
4967                 goto out_kfree;
4968         }
4969         /* Fill in the command type */
4970         c->cmd_type = CMD_IOCTL_PEND;
4971         /* Fill in Command Header */
4972         c->Header.ReplyQueue = 0; /* unused in simple mode */
4973         if (iocommand.buf_size > 0) {   /* buffer to fill */
4974                 c->Header.SGList = 1;
4975                 c->Header.SGTotal = 1;
4976         } else  { /* no buffers to fill */
4977                 c->Header.SGList = 0;
4978                 c->Header.SGTotal = 0;
4979         }
4980         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4981         /* use the kernel address the cmd block for tag */
4982         c->Header.Tag.lower = c->busaddr;
4983
4984         /* Fill in Request block */
4985         memcpy(&c->Request, &iocommand.Request,
4986                 sizeof(c->Request));
4987
4988         /* Fill in the scatter gather information */
4989         if (iocommand.buf_size > 0) {
4990                 temp64.val = pci_map_single(h->pdev, buff,
4991                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4992                 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
4993                         c->SG[0].Addr.lower = 0;
4994                         c->SG[0].Addr.upper = 0;
4995                         c->SG[0].Len = 0;
4996                         rc = -ENOMEM;
4997                         goto out;
4998                 }
4999                 c->SG[0].Addr.lower = temp64.val32.lower;
5000                 c->SG[0].Addr.upper = temp64.val32.upper;
5001                 c->SG[0].Len = iocommand.buf_size;
5002                 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
5003         }
5004         hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5005         if (iocommand.buf_size > 0)
5006                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
5007         check_ioctl_unit_attention(h, c);
5008
5009         /* Copy the error information out */
5010         memcpy(&iocommand.error_info, c->err_info,
5011                 sizeof(iocommand.error_info));
5012         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
5013                 rc = -EFAULT;
5014                 goto out;
5015         }
5016         if ((iocommand.Request.Type.Direction & XFER_READ) &&
5017                 iocommand.buf_size > 0) {
5018                 /* Copy the data out of the buffer we created */
5019                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
5020                         rc = -EFAULT;
5021                         goto out;
5022                 }
5023         }
5024 out:
5025         cmd_special_free(h, c);
5026 out_kfree:
5027         kfree(buff);
5028         return rc;
5029 }
5030
5031 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5032 {
5033         BIG_IOCTL_Command_struct *ioc;
5034         struct CommandList *c;
5035         unsigned char **buff = NULL;
5036         int *buff_size = NULL;
5037         union u64bit temp64;
5038         BYTE sg_used = 0;
5039         int status = 0;
5040         int i;
5041         u32 left;
5042         u32 sz;
5043         BYTE __user *data_ptr;
5044
5045         if (!argp)
5046                 return -EINVAL;
5047         if (!capable(CAP_SYS_RAWIO))
5048                 return -EPERM;
5049         ioc = (BIG_IOCTL_Command_struct *)
5050             kmalloc(sizeof(*ioc), GFP_KERNEL);
5051         if (!ioc) {
5052                 status = -ENOMEM;
5053                 goto cleanup1;
5054         }
5055         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5056                 status = -EFAULT;
5057                 goto cleanup1;
5058         }
5059         if ((ioc->buf_size < 1) &&
5060             (ioc->Request.Type.Direction != XFER_NONE)) {
5061                 status = -EINVAL;
5062                 goto cleanup1;
5063         }
5064         /* Check kmalloc limits  using all SGs */
5065         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5066                 status = -EINVAL;
5067                 goto cleanup1;
5068         }
5069         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
5070                 status = -EINVAL;
5071                 goto cleanup1;
5072         }
5073         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5074         if (!buff) {
5075                 status = -ENOMEM;
5076                 goto cleanup1;
5077         }
5078         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5079         if (!buff_size) {
5080                 status = -ENOMEM;
5081                 goto cleanup1;
5082         }
5083         left = ioc->buf_size;
5084         data_ptr = ioc->buf;
5085         while (left) {
5086                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5087                 buff_size[sg_used] = sz;
5088                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5089                 if (buff[sg_used] == NULL) {
5090                         status = -ENOMEM;
5091                         goto cleanup1;
5092                 }
5093                 if (ioc->Request.Type.Direction & XFER_WRITE) {
5094                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5095                                 status = -ENOMEM;
5096                                 goto cleanup1;
5097                         }
5098                 } else
5099                         memset(buff[sg_used], 0, sz);
5100                 left -= sz;
5101                 data_ptr += sz;
5102                 sg_used++;
5103         }
5104         c = cmd_special_alloc(h);
5105         if (c == NULL) {
5106                 status = -ENOMEM;
5107                 goto cleanup1;
5108         }
5109         c->cmd_type = CMD_IOCTL_PEND;
5110         c->Header.ReplyQueue = 0;
5111         c->Header.SGList = c->Header.SGTotal = sg_used;
5112         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5113         c->Header.Tag.lower = c->busaddr;
5114         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5115         if (ioc->buf_size > 0) {
5116                 int i;
5117                 for (i = 0; i < sg_used; i++) {
5118                         temp64.val = pci_map_single(h->pdev, buff[i],
5119                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
5120                         if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
5121                                 c->SG[i].Addr.lower = 0;
5122                                 c->SG[i].Addr.upper = 0;
5123                                 c->SG[i].Len = 0;
5124                                 hpsa_pci_unmap(h->pdev, c, i,
5125                                         PCI_DMA_BIDIRECTIONAL);
5126                                 status = -ENOMEM;
5127                                 goto cleanup0;
5128                         }
5129                         c->SG[i].Addr.lower = temp64.val32.lower;
5130                         c->SG[i].Addr.upper = temp64.val32.upper;
5131                         c->SG[i].Len = buff_size[i];
5132                         c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
5133                 }
5134         }
5135         hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5136         if (sg_used)
5137                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5138         check_ioctl_unit_attention(h, c);
5139         /* Copy the error information out */
5140         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5141         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5142                 status = -EFAULT;
5143                 goto cleanup0;
5144         }
5145         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5146                 /* Copy the data out of the buffer we created */
5147                 BYTE __user *ptr = ioc->buf;
5148                 for (i = 0; i < sg_used; i++) {
5149                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
5150                                 status = -EFAULT;
5151                                 goto cleanup0;
5152                         }
5153                         ptr += buff_size[i];
5154                 }
5155         }
5156         status = 0;
5157 cleanup0:
5158         cmd_special_free(h, c);
5159 cleanup1:
5160         if (buff) {
5161                 for (i = 0; i < sg_used; i++)
5162                         kfree(buff[i]);
5163                 kfree(buff);
5164         }
5165         kfree(buff_size);
5166         kfree(ioc);
5167         return status;
5168 }
5169
5170 static void check_ioctl_unit_attention(struct ctlr_info *h,
5171         struct CommandList *c)
5172 {
5173         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5174                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5175                 (void) check_for_unit_attention(h, c);
5176 }
5177
5178 static int increment_passthru_count(struct ctlr_info *h)
5179 {
5180         unsigned long flags;
5181
5182         spin_lock_irqsave(&h->passthru_count_lock, flags);
5183         if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
5184                 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5185                 return -1;
5186         }
5187         h->passthru_count++;
5188         spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5189         return 0;
5190 }
5191
5192 static void decrement_passthru_count(struct ctlr_info *h)
5193 {
5194         unsigned long flags;
5195
5196         spin_lock_irqsave(&h->passthru_count_lock, flags);
5197         if (h->passthru_count <= 0) {
5198                 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5199                 /* not expecting to get here. */
5200                 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
5201                 return;
5202         }
5203         h->passthru_count--;
5204         spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5205 }
5206
5207 /*
5208  * ioctl
5209  */
5210 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
5211 {
5212         struct ctlr_info *h;
5213         void __user *argp = (void __user *)arg;
5214         int rc;
5215
5216         h = sdev_to_hba(dev);
5217
5218         switch (cmd) {
5219         case CCISS_DEREGDISK:
5220         case CCISS_REGNEWDISK:
5221         case CCISS_REGNEWD:
5222                 hpsa_scan_start(h->scsi_host);
5223                 return 0;
5224         case CCISS_GETPCIINFO:
5225                 return hpsa_getpciinfo_ioctl(h, argp);
5226         case CCISS_GETDRIVVER:
5227                 return hpsa_getdrivver_ioctl(h, argp);
5228         case CCISS_PASSTHRU:
5229                 if (increment_passthru_count(h))
5230                         return -EAGAIN;
5231                 rc = hpsa_passthru_ioctl(h, argp);
5232                 decrement_passthru_count(h);
5233                 return rc;
5234         case CCISS_BIG_PASSTHRU:
5235                 if (increment_passthru_count(h))
5236                         return -EAGAIN;
5237                 rc = hpsa_big_passthru_ioctl(h, argp);
5238                 decrement_passthru_count(h);
5239                 return rc;
5240         default:
5241                 return -ENOTTY;
5242         }
5243 }
5244
5245 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5246                                 u8 reset_type)
5247 {
5248         struct CommandList *c;
5249
5250         c = cmd_alloc(h);
5251         if (!c)
5252                 return -ENOMEM;
5253         /* fill_cmd can't fail here, no data buffer to map */
5254         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5255                 RAID_CTLR_LUNID, TYPE_MSG);
5256         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5257         c->waiting = NULL;
5258         enqueue_cmd_and_start_io(h, c);
5259         /* Don't wait for completion, the reset won't complete.  Don't free
5260          * the command either.  This is the last command we will send before
5261          * re-initializing everything, so it doesn't matter and won't leak.
5262          */
5263         return 0;
5264 }
5265
5266 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5267         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5268         int cmd_type)
5269 {
5270         int pci_dir = XFER_NONE;
5271         struct CommandList *a; /* for commands to be aborted */
5272
5273         c->cmd_type = CMD_IOCTL_PEND;
5274         c->Header.ReplyQueue = 0;
5275         if (buff != NULL && size > 0) {
5276                 c->Header.SGList = 1;
5277                 c->Header.SGTotal = 1;
5278         } else {
5279                 c->Header.SGList = 0;
5280                 c->Header.SGTotal = 0;
5281         }
5282         c->Header.Tag.lower = c->busaddr;
5283         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5284
5285         c->Request.Type.Type = cmd_type;
5286         if (cmd_type == TYPE_CMD) {
5287                 switch (cmd) {
5288                 case HPSA_INQUIRY:
5289                         /* are we trying to read a vital product page */
5290                         if (page_code & VPD_PAGE) {
5291                                 c->Request.CDB[1] = 0x01;
5292                                 c->Request.CDB[2] = (page_code & 0xff);
5293                         }
5294                         c->Request.CDBLen = 6;
5295                         c->Request.Type.Attribute = ATTR_SIMPLE;
5296                         c->Request.Type.Direction = XFER_READ;
5297                         c->Request.Timeout = 0;
5298                         c->Request.CDB[0] = HPSA_INQUIRY;
5299                         c->Request.CDB[4] = size & 0xFF;
5300                         break;
5301                 case HPSA_REPORT_LOG:
5302                 case HPSA_REPORT_PHYS:
5303                         /* Talking to controller so It's a physical command
5304                            mode = 00 target = 0.  Nothing to write.
5305                          */
5306                         c->Request.CDBLen = 12;
5307                         c->Request.Type.Attribute = ATTR_SIMPLE;
5308                         c->Request.Type.Direction = XFER_READ;
5309                         c->Request.Timeout = 0;
5310                         c->Request.CDB[0] = cmd;
5311                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5312                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5313                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5314                         c->Request.CDB[9] = size & 0xFF;
5315                         break;
5316                 case HPSA_CACHE_FLUSH:
5317                         c->Request.CDBLen = 12;
5318                         c->Request.Type.Attribute = ATTR_SIMPLE;
5319                         c->Request.Type.Direction = XFER_WRITE;
5320                         c->Request.Timeout = 0;
5321                         c->Request.CDB[0] = BMIC_WRITE;
5322                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5323                         c->Request.CDB[7] = (size >> 8) & 0xFF;
5324                         c->Request.CDB[8] = size & 0xFF;
5325                         break;
5326                 case TEST_UNIT_READY:
5327                         c->Request.CDBLen = 6;
5328                         c->Request.Type.Attribute = ATTR_SIMPLE;
5329                         c->Request.Type.Direction = XFER_NONE;
5330                         c->Request.Timeout = 0;
5331                         break;
5332                 case HPSA_GET_RAID_MAP:
5333                         c->Request.CDBLen = 12;
5334                         c->Request.Type.Attribute = ATTR_SIMPLE;
5335                         c->Request.Type.Direction = XFER_READ;
5336                         c->Request.Timeout = 0;
5337                         c->Request.CDB[0] = HPSA_CISS_READ;
5338                         c->Request.CDB[1] = cmd;
5339                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5340                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5341                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5342                         c->Request.CDB[9] = size & 0xFF;
5343                         break;
5344                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5345                         c->Request.CDBLen = 10;
5346                         c->Request.Type.Attribute = ATTR_SIMPLE;
5347                         c->Request.Type.Direction = XFER_READ;
5348                         c->Request.Timeout = 0;
5349                         c->Request.CDB[0] = BMIC_READ;
5350                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5351                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5352                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5353                         break;
5354                 default:
5355                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5356                         BUG();
5357                         return -1;
5358                 }
5359         } else if (cmd_type == TYPE_MSG) {
5360                 switch (cmd) {
5361
5362                 case  HPSA_DEVICE_RESET_MSG:
5363                         c->Request.CDBLen = 16;
5364                         c->Request.Type.Type =  1; /* It is a MSG not a CMD */
5365                         c->Request.Type.Attribute = ATTR_SIMPLE;
5366                         c->Request.Type.Direction = XFER_NONE;
5367                         c->Request.Timeout = 0; /* Don't time out */
5368                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5369                         c->Request.CDB[0] =  cmd;
5370                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5371                         /* If bytes 4-7 are zero, it means reset the */
5372                         /* LunID device */
5373                         c->Request.CDB[4] = 0x00;
5374                         c->Request.CDB[5] = 0x00;
5375                         c->Request.CDB[6] = 0x00;
5376                         c->Request.CDB[7] = 0x00;
5377                         break;
5378                 case  HPSA_ABORT_MSG:
5379                         a = buff;       /* point to command to be aborted */
5380                         dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
5381                                 a->Header.Tag.upper, a->Header.Tag.lower,
5382                                 c->Header.Tag.upper, c->Header.Tag.lower);
5383                         c->Request.CDBLen = 16;
5384                         c->Request.Type.Type = TYPE_MSG;
5385                         c->Request.Type.Attribute = ATTR_SIMPLE;
5386                         c->Request.Type.Direction = XFER_WRITE;
5387                         c->Request.Timeout = 0; /* Don't time out */
5388                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5389                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5390                         c->Request.CDB[2] = 0x00; /* reserved */
5391                         c->Request.CDB[3] = 0x00; /* reserved */
5392                         /* Tag to abort goes in CDB[4]-CDB[11] */
5393                         c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
5394                         c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
5395                         c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
5396                         c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
5397                         c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
5398                         c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
5399                         c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
5400                         c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
5401                         c->Request.CDB[12] = 0x00; /* reserved */
5402                         c->Request.CDB[13] = 0x00; /* reserved */
5403                         c->Request.CDB[14] = 0x00; /* reserved */
5404                         c->Request.CDB[15] = 0x00; /* reserved */
5405                 break;
5406                 default:
5407                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
5408                                 cmd);
5409                         BUG();
5410                 }
5411         } else {
5412                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5413                 BUG();
5414         }
5415
5416         switch (c->Request.Type.Direction) {
5417         case XFER_READ:
5418                 pci_dir = PCI_DMA_FROMDEVICE;
5419                 break;
5420         case XFER_WRITE:
5421                 pci_dir = PCI_DMA_TODEVICE;
5422                 break;
5423         case XFER_NONE:
5424                 pci_dir = PCI_DMA_NONE;
5425                 break;
5426         default:
5427                 pci_dir = PCI_DMA_BIDIRECTIONAL;
5428         }
5429         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5430                 return -1;
5431         return 0;
5432 }
5433
5434 /*
5435  * Map (physical) PCI mem into (virtual) kernel space
5436  */
5437 static void __iomem *remap_pci_mem(ulong base, ulong size)
5438 {
5439         ulong page_base = ((ulong) base) & PAGE_MASK;
5440         ulong page_offs = ((ulong) base) - page_base;
5441         void __iomem *page_remapped = ioremap_nocache(page_base,
5442                 page_offs + size);
5443
5444         return page_remapped ? (page_remapped + page_offs) : NULL;
5445 }
5446
5447 /* Takes cmds off the submission queue and sends them to the hardware,
5448  * then puts them on the queue of cmds waiting for completion.
5449  * Assumes h->lock is held
5450  */
5451 static void start_io(struct ctlr_info *h, unsigned long *flags)
5452 {
5453         struct CommandList *c;
5454
5455         while (!list_empty(&h->reqQ)) {
5456                 c = list_entry(h->reqQ.next, struct CommandList, list);
5457                 /* can't do anything if fifo is full */
5458                 if ((h->access.fifo_full(h))) {
5459                         h->fifo_recently_full = 1;
5460                         dev_warn(&h->pdev->dev, "fifo full\n");
5461                         break;
5462                 }
5463                 h->fifo_recently_full = 0;
5464
5465                 /* Get the first entry from the Request Q */
5466                 removeQ(c);
5467                 h->Qdepth--;
5468
5469                 /* Put job onto the completed Q */
5470                 addQ(&h->cmpQ, c);
5471
5472                 /* Must increment commands_outstanding before unlocking
5473                  * and submitting to avoid race checking for fifo full
5474                  * condition.
5475                  */
5476                 h->commands_outstanding++;
5477
5478                 /* Tell the controller execute command */
5479                 spin_unlock_irqrestore(&h->lock, *flags);
5480                 h->access.submit_command(h, c);
5481                 spin_lock_irqsave(&h->lock, *flags);
5482         }
5483 }
5484
5485 static void lock_and_start_io(struct ctlr_info *h)
5486 {
5487         unsigned long flags;
5488
5489         spin_lock_irqsave(&h->lock, flags);
5490         start_io(h, &flags);
5491         spin_unlock_irqrestore(&h->lock, flags);
5492 }
5493
5494 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5495 {
5496         return h->access.command_completed(h, q);
5497 }
5498
5499 static inline bool interrupt_pending(struct ctlr_info *h)
5500 {
5501         return h->access.intr_pending(h);
5502 }
5503
5504 static inline long interrupt_not_for_us(struct ctlr_info *h)
5505 {
5506         return (h->access.intr_pending(h) == 0) ||
5507                 (h->interrupts_enabled == 0);
5508 }
5509
5510 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5511         u32 raw_tag)
5512 {
5513         if (unlikely(tag_index >= h->nr_cmds)) {
5514                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5515                 return 1;
5516         }
5517         return 0;
5518 }
5519
5520 static inline void finish_cmd(struct CommandList *c)
5521 {
5522         unsigned long flags;
5523         int io_may_be_stalled = 0;
5524         struct ctlr_info *h = c->h;
5525
5526         spin_lock_irqsave(&h->lock, flags);
5527         removeQ(c);
5528
5529         /*
5530          * Check for possibly stalled i/o.
5531          *
5532          * If a fifo_full condition is encountered, requests will back up
5533          * in h->reqQ.  This queue is only emptied out by start_io which is
5534          * only called when a new i/o request comes in.  If no i/o's are
5535          * forthcoming, the i/o's in h->reqQ can get stuck.  So we call
5536          * start_io from here if we detect such a danger.
5537          *
5538          * Normally, we shouldn't hit this case, but pounding on the
5539          * CCISS_PASSTHRU ioctl can provoke it.  Only call start_io if
5540          * commands_outstanding is low.  We want to avoid calling
5541          * start_io from in here as much as possible, and esp. don't
5542          * want to get in a cycle where we call start_io every time
5543          * through here.
5544          */
5545         if (unlikely(h->fifo_recently_full) &&
5546                 h->commands_outstanding < 5)
5547                 io_may_be_stalled = 1;
5548
5549         spin_unlock_irqrestore(&h->lock, flags);
5550
5551         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5552         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5553                         || c->cmd_type == CMD_IOACCEL2))
5554                 complete_scsi_command(c);
5555         else if (c->cmd_type == CMD_IOCTL_PEND)
5556                 complete(c->waiting);
5557         if (unlikely(io_may_be_stalled))
5558                 lock_and_start_io(h);
5559 }
5560
5561 static inline u32 hpsa_tag_contains_index(u32 tag)
5562 {
5563         return tag & DIRECT_LOOKUP_BIT;
5564 }
5565
5566 static inline u32 hpsa_tag_to_index(u32 tag)
5567 {
5568         return tag >> DIRECT_LOOKUP_SHIFT;
5569 }
5570
5571
5572 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5573 {
5574 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5575 #define HPSA_SIMPLE_ERROR_BITS 0x03
5576         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5577                 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5578         return tag & ~HPSA_PERF_ERROR_BITS;
5579 }
5580
5581 /* process completion of an indexed ("direct lookup") command */
5582 static inline void process_indexed_cmd(struct ctlr_info *h,
5583         u32 raw_tag)
5584 {
5585         u32 tag_index;
5586         struct CommandList *c;
5587
5588         tag_index = hpsa_tag_to_index(raw_tag);
5589         if (!bad_tag(h, tag_index, raw_tag)) {
5590                 c = h->cmd_pool + tag_index;
5591                 finish_cmd(c);
5592         }
5593 }
5594
5595 /* process completion of a non-indexed command */
5596 static inline void process_nonindexed_cmd(struct ctlr_info *h,
5597         u32 raw_tag)
5598 {
5599         u32 tag;
5600         struct CommandList *c = NULL;
5601         unsigned long flags;
5602
5603         tag = hpsa_tag_discard_error_bits(h, raw_tag);
5604         spin_lock_irqsave(&h->lock, flags);
5605         list_for_each_entry(c, &h->cmpQ, list) {
5606                 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
5607                         spin_unlock_irqrestore(&h->lock, flags);
5608                         finish_cmd(c);
5609                         return;
5610                 }
5611         }
5612         spin_unlock_irqrestore(&h->lock, flags);
5613         bad_tag(h, h->nr_cmds + 1, raw_tag);
5614 }
5615
5616 /* Some controllers, like p400, will give us one interrupt
5617  * after a soft reset, even if we turned interrupts off.
5618  * Only need to check for this in the hpsa_xxx_discard_completions
5619  * functions.
5620  */
5621 static int ignore_bogus_interrupt(struct ctlr_info *h)
5622 {
5623         if (likely(!reset_devices))
5624                 return 0;
5625
5626         if (likely(h->interrupts_enabled))
5627                 return 0;
5628
5629         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5630                 "(known firmware bug.)  Ignoring.\n");
5631
5632         return 1;
5633 }
5634
5635 /*
5636  * Convert &h->q[x] (passed to interrupt handlers) back to h.
5637  * Relies on (h-q[x] == x) being true for x such that
5638  * 0 <= x < MAX_REPLY_QUEUES.
5639  */
5640 static struct ctlr_info *queue_to_hba(u8 *queue)
5641 {
5642         return container_of((queue - *queue), struct ctlr_info, q[0]);
5643 }
5644
5645 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5646 {
5647         struct ctlr_info *h = queue_to_hba(queue);
5648         u8 q = *(u8 *) queue;
5649         u32 raw_tag;
5650
5651         if (ignore_bogus_interrupt(h))
5652                 return IRQ_NONE;
5653
5654         if (interrupt_not_for_us(h))
5655                 return IRQ_NONE;
5656         h->last_intr_timestamp = get_jiffies_64();
5657         while (interrupt_pending(h)) {
5658                 raw_tag = get_next_completion(h, q);
5659                 while (raw_tag != FIFO_EMPTY)
5660                         raw_tag = next_command(h, q);
5661         }
5662         return IRQ_HANDLED;
5663 }
5664
5665 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5666 {
5667         struct ctlr_info *h = queue_to_hba(queue);
5668         u32 raw_tag;
5669         u8 q = *(u8 *) queue;
5670
5671         if (ignore_bogus_interrupt(h))
5672                 return IRQ_NONE;
5673
5674         h->last_intr_timestamp = get_jiffies_64();
5675         raw_tag = get_next_completion(h, q);
5676         while (raw_tag != FIFO_EMPTY)
5677                 raw_tag = next_command(h, q);
5678         return IRQ_HANDLED;
5679 }
5680
5681 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5682 {
5683         struct ctlr_info *h = queue_to_hba((u8 *) queue);
5684         u32 raw_tag;
5685         u8 q = *(u8 *) queue;
5686
5687         if (interrupt_not_for_us(h))
5688                 return IRQ_NONE;
5689         h->last_intr_timestamp = get_jiffies_64();
5690         while (interrupt_pending(h)) {
5691                 raw_tag = get_next_completion(h, q);
5692                 while (raw_tag != FIFO_EMPTY) {
5693                         if (likely(hpsa_tag_contains_index(raw_tag)))
5694                                 process_indexed_cmd(h, raw_tag);
5695                         else
5696                                 process_nonindexed_cmd(h, raw_tag);
5697                         raw_tag = next_command(h, q);
5698                 }
5699         }
5700         return IRQ_HANDLED;
5701 }
5702
5703 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5704 {
5705         struct ctlr_info *h = queue_to_hba(queue);
5706         u32 raw_tag;
5707         u8 q = *(u8 *) queue;
5708
5709         h->last_intr_timestamp = get_jiffies_64();
5710         raw_tag = get_next_completion(h, q);
5711         while (raw_tag != FIFO_EMPTY) {
5712                 if (likely(hpsa_tag_contains_index(raw_tag)))
5713                         process_indexed_cmd(h, raw_tag);
5714                 else
5715                         process_nonindexed_cmd(h, raw_tag);
5716                 raw_tag = next_command(h, q);
5717         }
5718         return IRQ_HANDLED;
5719 }
5720
5721 /* Send a message CDB to the firmware. Careful, this only works
5722  * in simple mode, not performant mode due to the tag lookup.
5723  * We only ever use this immediately after a controller reset.
5724  */
5725 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5726                         unsigned char type)
5727 {
5728         struct Command {
5729                 struct CommandListHeader CommandHeader;
5730                 struct RequestBlock Request;
5731                 struct ErrDescriptor ErrorDescriptor;
5732         };
5733         struct Command *cmd;
5734         static const size_t cmd_sz = sizeof(*cmd) +
5735                                         sizeof(cmd->ErrorDescriptor);
5736         dma_addr_t paddr64;
5737         uint32_t paddr32, tag;
5738         void __iomem *vaddr;
5739         int i, err;
5740
5741         vaddr = pci_ioremap_bar(pdev, 0);
5742         if (vaddr == NULL)
5743                 return -ENOMEM;
5744
5745         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5746          * CCISS commands, so they must be allocated from the lower 4GiB of
5747          * memory.
5748          */
5749         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5750         if (err) {
5751                 iounmap(vaddr);
5752                 return -ENOMEM;
5753         }
5754
5755         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5756         if (cmd == NULL) {
5757                 iounmap(vaddr);
5758                 return -ENOMEM;
5759         }
5760
5761         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
5762          * although there's no guarantee, we assume that the address is at
5763          * least 4-byte aligned (most likely, it's page-aligned).
5764          */
5765         paddr32 = paddr64;
5766
5767         cmd->CommandHeader.ReplyQueue = 0;
5768         cmd->CommandHeader.SGList = 0;
5769         cmd->CommandHeader.SGTotal = 0;
5770         cmd->CommandHeader.Tag.lower = paddr32;
5771         cmd->CommandHeader.Tag.upper = 0;
5772         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5773
5774         cmd->Request.CDBLen = 16;
5775         cmd->Request.Type.Type = TYPE_MSG;
5776         cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
5777         cmd->Request.Type.Direction = XFER_NONE;
5778         cmd->Request.Timeout = 0; /* Don't time out */
5779         cmd->Request.CDB[0] = opcode;
5780         cmd->Request.CDB[1] = type;
5781         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5782         cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
5783         cmd->ErrorDescriptor.Addr.upper = 0;
5784         cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
5785
5786         writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5787
5788         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5789                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5790                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
5791                         break;
5792                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5793         }
5794
5795         iounmap(vaddr);
5796
5797         /* we leak the DMA buffer here ... no choice since the controller could
5798          *  still complete the command.
5799          */
5800         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5801                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5802                         opcode, type);
5803                 return -ETIMEDOUT;
5804         }
5805
5806         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5807
5808         if (tag & HPSA_ERROR_BIT) {
5809                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5810                         opcode, type);
5811                 return -EIO;
5812         }
5813
5814         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5815                 opcode, type);
5816         return 0;
5817 }
5818
5819 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5820
5821 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5822         void * __iomem vaddr, u32 use_doorbell)
5823 {
5824         u16 pmcsr;
5825         int pos;
5826
5827         if (use_doorbell) {
5828                 /* For everything after the P600, the PCI power state method
5829                  * of resetting the controller doesn't work, so we have this
5830                  * other way using the doorbell register.
5831                  */
5832                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5833                 writel(use_doorbell, vaddr + SA5_DOORBELL);
5834
5835                 /* PMC hardware guys tell us we need a 10 second delay after
5836                  * doorbell reset and before any attempt to talk to the board
5837                  * at all to ensure that this actually works and doesn't fall
5838                  * over in some weird corner cases.
5839                  */
5840                 msleep(10000);
5841         } else { /* Try to do it the PCI power state way */
5842
5843                 /* Quoting from the Open CISS Specification: "The Power
5844                  * Management Control/Status Register (CSR) controls the power
5845                  * state of the device.  The normal operating state is D0,
5846                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
5847                  * the controller, place the interface device in D3 then to D0,
5848                  * this causes a secondary PCI reset which will reset the
5849                  * controller." */
5850
5851                 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5852                 if (pos == 0) {
5853                         dev_err(&pdev->dev,
5854                                 "hpsa_reset_controller: "
5855                                 "PCI PM not supported\n");
5856                         return -ENODEV;
5857                 }
5858                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5859                 /* enter the D3hot power management state */
5860                 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5861                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5862                 pmcsr |= PCI_D3hot;
5863                 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5864
5865                 msleep(500);
5866
5867                 /* enter the D0 power management state */
5868                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5869                 pmcsr |= PCI_D0;
5870                 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5871
5872                 /*
5873                  * The P600 requires a small delay when changing states.
5874                  * Otherwise we may think the board did not reset and we bail.
5875                  * This for kdump only and is particular to the P600.
5876                  */
5877                 msleep(500);
5878         }
5879         return 0;
5880 }
5881
5882 static void init_driver_version(char *driver_version, int len)
5883 {
5884         memset(driver_version, 0, len);
5885         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5886 }
5887
5888 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5889 {
5890         char *driver_version;
5891         int i, size = sizeof(cfgtable->driver_version);
5892
5893         driver_version = kmalloc(size, GFP_KERNEL);
5894         if (!driver_version)
5895                 return -ENOMEM;
5896
5897         init_driver_version(driver_version, size);
5898         for (i = 0; i < size; i++)
5899                 writeb(driver_version[i], &cfgtable->driver_version[i]);
5900         kfree(driver_version);
5901         return 0;
5902 }
5903
5904 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5905                                           unsigned char *driver_ver)
5906 {
5907         int i;
5908
5909         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5910                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5911 }
5912
5913 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5914 {
5915
5916         char *driver_ver, *old_driver_ver;
5917         int rc, size = sizeof(cfgtable->driver_version);
5918
5919         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5920         if (!old_driver_ver)
5921                 return -ENOMEM;
5922         driver_ver = old_driver_ver + size;
5923
5924         /* After a reset, the 32 bytes of "driver version" in the cfgtable
5925          * should have been changed, otherwise we know the reset failed.
5926          */
5927         init_driver_version(old_driver_ver, size);
5928         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5929         rc = !memcmp(driver_ver, old_driver_ver, size);
5930         kfree(old_driver_ver);
5931         return rc;
5932 }
5933 /* This does a hard reset of the controller using PCI power management
5934  * states or the using the doorbell register.
5935  */
5936 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5937 {
5938         u64 cfg_offset;
5939         u32 cfg_base_addr;
5940         u64 cfg_base_addr_index;
5941         void __iomem *vaddr;
5942         unsigned long paddr;
5943         u32 misc_fw_support;
5944         int rc;
5945         struct CfgTable __iomem *cfgtable;
5946         u32 use_doorbell;
5947         u32 board_id;
5948         u16 command_register;
5949
5950         /* For controllers as old as the P600, this is very nearly
5951          * the same thing as
5952          *
5953          * pci_save_state(pci_dev);
5954          * pci_set_power_state(pci_dev, PCI_D3hot);
5955          * pci_set_power_state(pci_dev, PCI_D0);
5956          * pci_restore_state(pci_dev);
5957          *
5958          * For controllers newer than the P600, the pci power state
5959          * method of resetting doesn't work so we have another way
5960          * using the doorbell register.
5961          */
5962
5963         rc = hpsa_lookup_board_id(pdev, &board_id);
5964         if (rc < 0 || !ctlr_is_resettable(board_id)) {
5965                 dev_warn(&pdev->dev, "Not resetting device.\n");
5966                 return -ENODEV;
5967         }
5968
5969         /* if controller is soft- but not hard resettable... */
5970         if (!ctlr_is_hard_resettable(board_id))
5971                 return -ENOTSUPP; /* try soft reset later. */
5972
5973         /* Save the PCI command register */
5974         pci_read_config_word(pdev, 4, &command_register);
5975         /* Turn the board off.  This is so that later pci_restore_state()
5976          * won't turn the board on before the rest of config space is ready.
5977          */
5978         pci_disable_device(pdev);
5979         pci_save_state(pdev);
5980
5981         /* find the first memory BAR, so we can find the cfg table */
5982         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5983         if (rc)
5984                 return rc;
5985         vaddr = remap_pci_mem(paddr, 0x250);
5986         if (!vaddr)
5987                 return -ENOMEM;
5988
5989         /* find cfgtable in order to check if reset via doorbell is supported */
5990         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5991                                         &cfg_base_addr_index, &cfg_offset);
5992         if (rc)
5993                 goto unmap_vaddr;
5994         cfgtable = remap_pci_mem(pci_resource_start(pdev,
5995                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5996         if (!cfgtable) {
5997                 rc = -ENOMEM;
5998                 goto unmap_vaddr;
5999         }
6000         rc = write_driver_ver_to_cfgtable(cfgtable);
6001         if (rc)
6002                 goto unmap_vaddr;
6003
6004         /* If reset via doorbell register is supported, use that.
6005          * There are two such methods.  Favor the newest method.
6006          */
6007         misc_fw_support = readl(&cfgtable->misc_fw_support);
6008         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6009         if (use_doorbell) {
6010                 use_doorbell = DOORBELL_CTLR_RESET2;
6011         } else {
6012                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6013                 if (use_doorbell) {
6014                         dev_warn(&pdev->dev, "Soft reset not supported. "
6015                                 "Firmware update is required.\n");
6016                         rc = -ENOTSUPP; /* try soft reset */
6017                         goto unmap_cfgtable;
6018                 }
6019         }
6020
6021         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6022         if (rc)
6023                 goto unmap_cfgtable;
6024
6025         pci_restore_state(pdev);
6026         rc = pci_enable_device(pdev);
6027         if (rc) {
6028                 dev_warn(&pdev->dev, "failed to enable device.\n");
6029                 goto unmap_cfgtable;
6030         }
6031         pci_write_config_word(pdev, 4, command_register);
6032
6033         /* Some devices (notably the HP Smart Array 5i Controller)
6034            need a little pause here */
6035         msleep(HPSA_POST_RESET_PAUSE_MSECS);
6036
6037         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6038         if (rc) {
6039                 dev_warn(&pdev->dev,
6040                         "failed waiting for board to become ready "
6041                         "after hard reset\n");
6042                 goto unmap_cfgtable;
6043         }
6044
6045         rc = controller_reset_failed(vaddr);
6046         if (rc < 0)
6047                 goto unmap_cfgtable;
6048         if (rc) {
6049                 dev_warn(&pdev->dev, "Unable to successfully reset "
6050                         "controller. Will try soft reset.\n");
6051                 rc = -ENOTSUPP;
6052         } else {
6053                 dev_info(&pdev->dev, "board ready after hard reset.\n");
6054         }
6055
6056 unmap_cfgtable:
6057         iounmap(cfgtable);
6058
6059 unmap_vaddr:
6060         iounmap(vaddr);
6061         return rc;
6062 }
6063
6064 /*
6065  *  We cannot read the structure directly, for portability we must use
6066  *   the io functions.
6067  *   This is for debug only.
6068  */
6069 static void print_cfg_table(struct device *dev, struct CfgTable *tb)
6070 {
6071 #ifdef HPSA_DEBUG
6072         int i;
6073         char temp_name[17];
6074
6075         dev_info(dev, "Controller Configuration information\n");
6076         dev_info(dev, "------------------------------------\n");
6077         for (i = 0; i < 4; i++)
6078                 temp_name[i] = readb(&(tb->Signature[i]));
6079         temp_name[4] = '\0';
6080         dev_info(dev, "   Signature = %s\n", temp_name);
6081         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
6082         dev_info(dev, "   Transport methods supported = 0x%x\n",
6083                readl(&(tb->TransportSupport)));
6084         dev_info(dev, "   Transport methods active = 0x%x\n",
6085                readl(&(tb->TransportActive)));
6086         dev_info(dev, "   Requested transport Method = 0x%x\n",
6087                readl(&(tb->HostWrite.TransportRequest)));
6088         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
6089                readl(&(tb->HostWrite.CoalIntDelay)));
6090         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
6091                readl(&(tb->HostWrite.CoalIntCount)));
6092         dev_info(dev, "   Max outstanding commands = 0x%d\n",
6093                readl(&(tb->CmdsOutMax)));
6094         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6095         for (i = 0; i < 16; i++)
6096                 temp_name[i] = readb(&(tb->ServerName[i]));
6097         temp_name[16] = '\0';
6098         dev_info(dev, "   Server Name = %s\n", temp_name);
6099         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
6100                 readl(&(tb->HeartBeat)));
6101 #endif                          /* HPSA_DEBUG */
6102 }
6103
6104 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6105 {
6106         int i, offset, mem_type, bar_type;
6107
6108         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6109                 return 0;
6110         offset = 0;
6111         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6112                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6113                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6114                         offset += 4;
6115                 else {
6116                         mem_type = pci_resource_flags(pdev, i) &
6117                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6118                         switch (mem_type) {
6119                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
6120                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6121                                 offset += 4;    /* 32 bit */
6122                                 break;
6123                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
6124                                 offset += 8;
6125                                 break;
6126                         default:        /* reserved in PCI 2.2 */
6127                                 dev_warn(&pdev->dev,
6128                                        "base address is invalid\n");
6129                                 return -1;
6130                                 break;
6131                         }
6132                 }
6133                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6134                         return i + 1;
6135         }
6136         return -1;
6137 }
6138
6139 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
6140  * controllers that are capable. If not, we use IO-APIC mode.
6141  */
6142
6143 static void hpsa_interrupt_mode(struct ctlr_info *h)
6144 {
6145 #ifdef CONFIG_PCI_MSI
6146         int err, i;
6147         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6148
6149         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6150                 hpsa_msix_entries[i].vector = 0;
6151                 hpsa_msix_entries[i].entry = i;
6152         }
6153
6154         /* Some boards advertise MSI but don't really support it */
6155         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6156             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
6157                 goto default_int_mode;
6158         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6159                 dev_info(&h->pdev->dev, "MSIX\n");
6160                 h->msix_vector = MAX_REPLY_QUEUES;
6161                 if (h->msix_vector > num_online_cpus())
6162                         h->msix_vector = num_online_cpus();
6163                 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6164                                       h->msix_vector);
6165                 if (err > 0) {
6166                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6167                                "available\n", err);
6168                         h->msix_vector = err;
6169                         err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6170                                               h->msix_vector);
6171                 }
6172                 if (!err) {
6173                         for (i = 0; i < h->msix_vector; i++)
6174                                 h->intr[i] = hpsa_msix_entries[i].vector;
6175                         return;
6176                 } else {
6177                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
6178                                err);
6179                         h->msix_vector = 0;
6180                         goto default_int_mode;
6181                 }
6182         }
6183         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6184                 dev_info(&h->pdev->dev, "MSI\n");
6185                 if (!pci_enable_msi(h->pdev))
6186                         h->msi_vector = 1;
6187                 else
6188                         dev_warn(&h->pdev->dev, "MSI init failed\n");
6189         }
6190 default_int_mode:
6191 #endif                          /* CONFIG_PCI_MSI */
6192         /* if we get here we're going to use the default interrupt mode */
6193         h->intr[h->intr_mode] = h->pdev->irq;
6194 }
6195
6196 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
6197 {
6198         int i;
6199         u32 subsystem_vendor_id, subsystem_device_id;
6200
6201         subsystem_vendor_id = pdev->subsystem_vendor;
6202         subsystem_device_id = pdev->subsystem_device;
6203         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6204                     subsystem_vendor_id;
6205
6206         for (i = 0; i < ARRAY_SIZE(products); i++)
6207                 if (*board_id == products[i].board_id)
6208                         return i;
6209
6210         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6211                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6212                 !hpsa_allow_any) {
6213                 dev_warn(&pdev->dev, "unrecognized board ID: "
6214                         "0x%08x, ignoring.\n", *board_id);
6215                         return -ENODEV;
6216         }
6217         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6218 }
6219
6220 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6221                                     unsigned long *memory_bar)
6222 {
6223         int i;
6224
6225         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
6226                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
6227                         /* addressing mode bits already removed */
6228                         *memory_bar = pci_resource_start(pdev, i);
6229                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6230                                 *memory_bar);
6231                         return 0;
6232                 }
6233         dev_warn(&pdev->dev, "no memory BAR found\n");
6234         return -ENODEV;
6235 }
6236
6237 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6238                                      int wait_for_ready)
6239 {
6240         int i, iterations;
6241         u32 scratchpad;
6242         if (wait_for_ready)
6243                 iterations = HPSA_BOARD_READY_ITERATIONS;
6244         else
6245                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6246
6247         for (i = 0; i < iterations; i++) {
6248                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6249                 if (wait_for_ready) {
6250                         if (scratchpad == HPSA_FIRMWARE_READY)
6251                                 return 0;
6252                 } else {
6253                         if (scratchpad != HPSA_FIRMWARE_READY)
6254                                 return 0;
6255                 }
6256                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6257         }
6258         dev_warn(&pdev->dev, "board not ready, timed out.\n");
6259         return -ENODEV;
6260 }
6261
6262 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6263                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6264                                u64 *cfg_offset)
6265 {
6266         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6267         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6268         *cfg_base_addr &= (u32) 0x0000ffff;
6269         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6270         if (*cfg_base_addr_index == -1) {
6271                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6272                 return -ENODEV;
6273         }
6274         return 0;
6275 }
6276
6277 static int hpsa_find_cfgtables(struct ctlr_info *h)
6278 {
6279         u64 cfg_offset;
6280         u32 cfg_base_addr;
6281         u64 cfg_base_addr_index;
6282         u32 trans_offset;
6283         int rc;
6284
6285         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6286                 &cfg_base_addr_index, &cfg_offset);
6287         if (rc)
6288                 return rc;
6289         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6290                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6291         if (!h->cfgtable)
6292                 return -ENOMEM;
6293         rc = write_driver_ver_to_cfgtable(h->cfgtable);
6294         if (rc)
6295                 return rc;
6296         /* Find performant mode table. */
6297         trans_offset = readl(&h->cfgtable->TransMethodOffset);
6298         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6299                                 cfg_base_addr_index)+cfg_offset+trans_offset,
6300                                 sizeof(*h->transtable));
6301         if (!h->transtable)
6302                 return -ENOMEM;
6303         return 0;
6304 }
6305
6306 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6307 {
6308         h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
6309
6310         /* Limit commands in memory limited kdump scenario. */
6311         if (reset_devices && h->max_commands > 32)
6312                 h->max_commands = 32;
6313
6314         if (h->max_commands < 16) {
6315                 dev_warn(&h->pdev->dev, "Controller reports "
6316                         "max supported commands of %d, an obvious lie. "
6317                         "Using 16.  Ensure that firmware is up to date.\n",
6318                         h->max_commands);
6319                 h->max_commands = 16;
6320         }
6321 }
6322
6323 /* Interrogate the hardware for some limits:
6324  * max commands, max SG elements without chaining, and with chaining,
6325  * SG chain block size, etc.
6326  */
6327 static void hpsa_find_board_params(struct ctlr_info *h)
6328 {
6329         hpsa_get_max_perf_mode_cmds(h);
6330         h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
6331         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6332         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6333         /*
6334          * Limit in-command s/g elements to 32 save dma'able memory.
6335          * Howvever spec says if 0, use 31
6336          */
6337         h->max_cmd_sg_entries = 31;
6338         if (h->maxsgentries > 512) {
6339                 h->max_cmd_sg_entries = 32;
6340                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
6341                 h->maxsgentries--; /* save one for chain pointer */
6342         } else {
6343                 h->maxsgentries = 31; /* default to traditional values */
6344                 h->chainsize = 0;
6345         }
6346
6347         /* Find out what task management functions are supported and cache */
6348         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6349         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6350                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6351         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6352                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6353 }
6354
6355 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6356 {
6357         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6358                 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
6359                 return false;
6360         }
6361         return true;
6362 }
6363
6364 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6365 {
6366         u32 driver_support;
6367
6368 #ifdef CONFIG_X86
6369         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6370         driver_support = readl(&(h->cfgtable->driver_support));
6371         driver_support |= ENABLE_SCSI_PREFETCH;
6372 #endif
6373         driver_support |= ENABLE_UNIT_ATTN;
6374         writel(driver_support, &(h->cfgtable->driver_support));
6375 }
6376
6377 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
6378  * in a prefetch beyond physical memory.
6379  */
6380 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6381 {
6382         u32 dma_prefetch;
6383
6384         if (h->board_id != 0x3225103C)
6385                 return;
6386         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6387         dma_prefetch |= 0x8000;
6388         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6389 }
6390
6391 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6392 {
6393         int i;
6394         u32 doorbell_value;
6395         unsigned long flags;
6396         /* wait until the clear_event_notify bit 6 is cleared by controller. */
6397         for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6398                 spin_lock_irqsave(&h->lock, flags);
6399                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6400                 spin_unlock_irqrestore(&h->lock, flags);
6401                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6402                         break;
6403                 /* delay and try again */
6404                 msleep(20);
6405         }
6406 }
6407
6408 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6409 {
6410         int i;
6411         u32 doorbell_value;
6412         unsigned long flags;
6413
6414         /* under certain very rare conditions, this can take awhile.
6415          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6416          * as we enter this code.)
6417          */
6418         for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6419                 spin_lock_irqsave(&h->lock, flags);
6420                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6421                 spin_unlock_irqrestore(&h->lock, flags);
6422                 if (!(doorbell_value & CFGTBL_ChangeReq))
6423                         break;
6424                 /* delay and try again */
6425                 usleep_range(10000, 20000);
6426         }
6427 }
6428
6429 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6430 {
6431         u32 trans_support;
6432
6433         trans_support = readl(&(h->cfgtable->TransportSupport));
6434         if (!(trans_support & SIMPLE_MODE))
6435                 return -ENOTSUPP;
6436
6437         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6438
6439         /* Update the field, and then ring the doorbell */
6440         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6441         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6442         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6443         hpsa_wait_for_mode_change_ack(h);
6444         print_cfg_table(&h->pdev->dev, h->cfgtable);
6445         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6446                 goto error;
6447         h->transMethod = CFGTBL_Trans_Simple;
6448         return 0;
6449 error:
6450         dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6451         return -ENODEV;
6452 }
6453
6454 static int hpsa_pci_init(struct ctlr_info *h)
6455 {
6456         int prod_index, err;
6457
6458         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6459         if (prod_index < 0)
6460                 return -ENODEV;
6461         h->product_name = products[prod_index].product_name;
6462         h->access = *(products[prod_index].access);
6463
6464         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6465                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6466
6467         err = pci_enable_device(h->pdev);
6468         if (err) {
6469                 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6470                 return err;
6471         }
6472
6473         /* Enable bus mastering (pci_disable_device may disable this) */
6474         pci_set_master(h->pdev);
6475
6476         err = pci_request_regions(h->pdev, HPSA);
6477         if (err) {
6478                 dev_err(&h->pdev->dev,
6479                         "cannot obtain PCI resources, aborting\n");
6480                 return err;
6481         }
6482         hpsa_interrupt_mode(h);
6483         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6484         if (err)
6485                 goto err_out_free_res;
6486         h->vaddr = remap_pci_mem(h->paddr, 0x250);
6487         if (!h->vaddr) {
6488                 err = -ENOMEM;
6489                 goto err_out_free_res;
6490         }
6491         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6492         if (err)
6493                 goto err_out_free_res;
6494         err = hpsa_find_cfgtables(h);
6495         if (err)
6496                 goto err_out_free_res;
6497         hpsa_find_board_params(h);
6498
6499         if (!hpsa_CISS_signature_present(h)) {
6500                 err = -ENODEV;
6501                 goto err_out_free_res;
6502         }
6503         hpsa_set_driver_support_bits(h);
6504         hpsa_p600_dma_prefetch_quirk(h);
6505         err = hpsa_enter_simple_mode(h);
6506         if (err)
6507                 goto err_out_free_res;
6508         return 0;
6509
6510 err_out_free_res:
6511         if (h->transtable)
6512                 iounmap(h->transtable);
6513         if (h->cfgtable)
6514                 iounmap(h->cfgtable);
6515         if (h->vaddr)
6516                 iounmap(h->vaddr);
6517         pci_disable_device(h->pdev);
6518         pci_release_regions(h->pdev);
6519         return err;
6520 }
6521
6522 static void hpsa_hba_inquiry(struct ctlr_info *h)
6523 {
6524         int rc;
6525
6526 #define HBA_INQUIRY_BYTE_COUNT 64
6527         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6528         if (!h->hba_inquiry_data)
6529                 return;
6530         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6531                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6532         if (rc != 0) {
6533                 kfree(h->hba_inquiry_data);
6534                 h->hba_inquiry_data = NULL;
6535         }
6536 }
6537
6538 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6539 {
6540         int rc, i;
6541
6542         if (!reset_devices)
6543                 return 0;
6544
6545         /* Reset the controller with a PCI power-cycle or via doorbell */
6546         rc = hpsa_kdump_hard_reset_controller(pdev);
6547
6548         /* -ENOTSUPP here means we cannot reset the controller
6549          * but it's already (and still) up and running in
6550          * "performant mode".  Or, it might be 640x, which can't reset
6551          * due to concerns about shared bbwc between 6402/6404 pair.
6552          */
6553         if (rc == -ENOTSUPP)
6554                 return rc; /* just try to do the kdump anyhow. */
6555         if (rc)
6556                 return -ENODEV;
6557
6558         /* Now try to get the controller to respond to a no-op */
6559         dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
6560         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6561                 if (hpsa_noop(pdev) == 0)
6562                         break;
6563                 else
6564                         dev_warn(&pdev->dev, "no-op failed%s\n",
6565                                         (i < 11 ? "; re-trying" : ""));
6566         }
6567         return 0;
6568 }
6569
6570 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6571 {
6572         h->cmd_pool_bits = kzalloc(
6573                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6574                 sizeof(unsigned long), GFP_KERNEL);
6575         h->cmd_pool = pci_alloc_consistent(h->pdev,
6576                     h->nr_cmds * sizeof(*h->cmd_pool),
6577                     &(h->cmd_pool_dhandle));
6578         h->errinfo_pool = pci_alloc_consistent(h->pdev,
6579                     h->nr_cmds * sizeof(*h->errinfo_pool),
6580                     &(h->errinfo_pool_dhandle));
6581         if ((h->cmd_pool_bits == NULL)
6582             || (h->cmd_pool == NULL)
6583             || (h->errinfo_pool == NULL)) {
6584                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6585                 return -ENOMEM;
6586         }
6587         return 0;
6588 }
6589
6590 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6591 {
6592         kfree(h->cmd_pool_bits);
6593         if (h->cmd_pool)
6594                 pci_free_consistent(h->pdev,
6595                             h->nr_cmds * sizeof(struct CommandList),
6596                             h->cmd_pool, h->cmd_pool_dhandle);
6597         if (h->ioaccel2_cmd_pool)
6598                 pci_free_consistent(h->pdev,
6599                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6600                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6601         if (h->errinfo_pool)
6602                 pci_free_consistent(h->pdev,
6603                             h->nr_cmds * sizeof(struct ErrorInfo),
6604                             h->errinfo_pool,
6605                             h->errinfo_pool_dhandle);
6606         if (h->ioaccel_cmd_pool)
6607                 pci_free_consistent(h->pdev,
6608                         h->nr_cmds * sizeof(struct io_accel1_cmd),
6609                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6610 }
6611
6612 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6613 {
6614         int i, cpu, rc;
6615
6616         cpu = cpumask_first(cpu_online_mask);
6617         for (i = 0; i < h->msix_vector; i++) {
6618                 rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6619                 cpu = cpumask_next(cpu, cpu_online_mask);
6620         }
6621 }
6622
6623 static int hpsa_request_irq(struct ctlr_info *h,
6624         irqreturn_t (*msixhandler)(int, void *),
6625         irqreturn_t (*intxhandler)(int, void *))
6626 {
6627         int rc, i;
6628
6629         /*
6630          * initialize h->q[x] = x so that interrupt handlers know which
6631          * queue to process.
6632          */
6633         for (i = 0; i < MAX_REPLY_QUEUES; i++)
6634                 h->q[i] = (u8) i;
6635
6636         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6637                 /* If performant mode and MSI-X, use multiple reply queues */
6638                 for (i = 0; i < h->msix_vector; i++)
6639                         rc = request_irq(h->intr[i], msixhandler,
6640                                         0, h->devname,
6641                                         &h->q[i]);
6642                 hpsa_irq_affinity_hints(h);
6643         } else {
6644                 /* Use single reply pool */
6645                 if (h->msix_vector > 0 || h->msi_vector) {
6646                         rc = request_irq(h->intr[h->intr_mode],
6647                                 msixhandler, 0, h->devname,
6648                                 &h->q[h->intr_mode]);
6649                 } else {
6650                         rc = request_irq(h->intr[h->intr_mode],
6651                                 intxhandler, IRQF_SHARED, h->devname,
6652                                 &h->q[h->intr_mode]);
6653                 }
6654         }
6655         if (rc) {
6656                 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6657                        h->intr[h->intr_mode], h->devname);
6658                 return -ENODEV;
6659         }
6660         return 0;
6661 }
6662
6663 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6664 {
6665         if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6666                 HPSA_RESET_TYPE_CONTROLLER)) {
6667                 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6668                 return -EIO;
6669         }
6670
6671         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6672         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6673                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6674                 return -1;
6675         }
6676
6677         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6678         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6679                 dev_warn(&h->pdev->dev, "Board failed to become ready "
6680                         "after soft reset.\n");
6681                 return -1;
6682         }
6683
6684         return 0;
6685 }
6686
6687 static void free_irqs(struct ctlr_info *h)
6688 {
6689         int i;
6690
6691         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6692                 /* Single reply queue, only one irq to free */
6693                 i = h->intr_mode;
6694                 irq_set_affinity_hint(h->intr[i], NULL);
6695                 free_irq(h->intr[i], &h->q[i]);
6696                 return;
6697         }
6698
6699         for (i = 0; i < h->msix_vector; i++) {
6700                 irq_set_affinity_hint(h->intr[i], NULL);
6701                 free_irq(h->intr[i], &h->q[i]);
6702         }
6703 }
6704
6705 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6706 {
6707         free_irqs(h);
6708 #ifdef CONFIG_PCI_MSI
6709         if (h->msix_vector) {
6710                 if (h->pdev->msix_enabled)
6711                         pci_disable_msix(h->pdev);
6712         } else if (h->msi_vector) {
6713                 if (h->pdev->msi_enabled)
6714                         pci_disable_msi(h->pdev);
6715         }
6716 #endif /* CONFIG_PCI_MSI */
6717 }
6718
6719 static void hpsa_free_reply_queues(struct ctlr_info *h)
6720 {
6721         int i;
6722
6723         for (i = 0; i < h->nreply_queues; i++) {
6724                 if (!h->reply_queue[i].head)
6725                         continue;
6726                 pci_free_consistent(h->pdev, h->reply_queue_size,
6727                         h->reply_queue[i].head, h->reply_queue[i].busaddr);
6728                 h->reply_queue[i].head = NULL;
6729                 h->reply_queue[i].busaddr = 0;
6730         }
6731 }
6732
6733 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6734 {
6735         hpsa_free_irqs_and_disable_msix(h);
6736         hpsa_free_sg_chain_blocks(h);
6737         hpsa_free_cmd_pool(h);
6738         kfree(h->ioaccel1_blockFetchTable);
6739         kfree(h->blockFetchTable);
6740         hpsa_free_reply_queues(h);
6741         if (h->vaddr)
6742                 iounmap(h->vaddr);
6743         if (h->transtable)
6744                 iounmap(h->transtable);
6745         if (h->cfgtable)
6746                 iounmap(h->cfgtable);
6747         pci_release_regions(h->pdev);
6748         kfree(h);
6749 }
6750
6751 /* Called when controller lockup detected. */
6752 static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6753 {
6754         struct CommandList *c = NULL;
6755
6756         assert_spin_locked(&h->lock);
6757         /* Mark all outstanding commands as failed and complete them. */
6758         while (!list_empty(list)) {
6759                 c = list_entry(list->next, struct CommandList, list);
6760                 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6761                 finish_cmd(c);
6762         }
6763 }
6764
6765 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6766 {
6767         int i, cpu;
6768
6769         cpu = cpumask_first(cpu_online_mask);
6770         for (i = 0; i < num_online_cpus(); i++) {
6771                 u32 *lockup_detected;
6772                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6773                 *lockup_detected = value;
6774                 cpu = cpumask_next(cpu, cpu_online_mask);
6775         }
6776         wmb(); /* be sure the per-cpu variables are out to memory */
6777 }
6778
6779 static void controller_lockup_detected(struct ctlr_info *h)
6780 {
6781         unsigned long flags;
6782         u32 lockup_detected;
6783
6784         h->access.set_intr_mask(h, HPSA_INTR_OFF);
6785         spin_lock_irqsave(&h->lock, flags);
6786         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6787         if (!lockup_detected) {
6788                 /* no heartbeat, but controller gave us a zero. */
6789                 dev_warn(&h->pdev->dev,
6790                         "lockup detected but scratchpad register is zero\n");
6791                 lockup_detected = 0xffffffff;
6792         }
6793         set_lockup_detected_for_all_cpus(h, lockup_detected);
6794         spin_unlock_irqrestore(&h->lock, flags);
6795         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6796                         lockup_detected);
6797         pci_disable_device(h->pdev);
6798         spin_lock_irqsave(&h->lock, flags);
6799         fail_all_cmds_on_list(h, &h->cmpQ);
6800         fail_all_cmds_on_list(h, &h->reqQ);
6801         spin_unlock_irqrestore(&h->lock, flags);
6802 }
6803
6804 static void detect_controller_lockup(struct ctlr_info *h)
6805 {
6806         u64 now;
6807         u32 heartbeat;
6808         unsigned long flags;
6809
6810         now = get_jiffies_64();
6811         /* If we've received an interrupt recently, we're ok. */
6812         if (time_after64(h->last_intr_timestamp +
6813                                 (h->heartbeat_sample_interval), now))
6814                 return;
6815
6816         /*
6817          * If we've already checked the heartbeat recently, we're ok.
6818          * This could happen if someone sends us a signal. We
6819          * otherwise don't care about signals in this thread.
6820          */
6821         if (time_after64(h->last_heartbeat_timestamp +
6822                                 (h->heartbeat_sample_interval), now))
6823                 return;
6824
6825         /* If heartbeat has not changed since we last looked, we're not ok. */
6826         spin_lock_irqsave(&h->lock, flags);
6827         heartbeat = readl(&h->cfgtable->HeartBeat);
6828         spin_unlock_irqrestore(&h->lock, flags);
6829         if (h->last_heartbeat == heartbeat) {
6830                 controller_lockup_detected(h);
6831                 return;
6832         }
6833
6834         /* We're ok. */
6835         h->last_heartbeat = heartbeat;
6836         h->last_heartbeat_timestamp = now;
6837 }
6838
6839 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6840 {
6841         int i;
6842         char *event_type;
6843
6844         /* Clear the driver-requested rescan flag */
6845         h->drv_req_rescan = 0;
6846
6847         /* Ask the controller to clear the events we're handling. */
6848         if ((h->transMethod & (CFGTBL_Trans_io_accel1
6849                         | CFGTBL_Trans_io_accel2)) &&
6850                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6851                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6852
6853                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6854                         event_type = "state change";
6855                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6856                         event_type = "configuration change";
6857                 /* Stop sending new RAID offload reqs via the IO accelerator */
6858                 scsi_block_requests(h->scsi_host);
6859                 for (i = 0; i < h->ndevices; i++)
6860                         h->dev[i]->offload_enabled = 0;
6861                 hpsa_drain_accel_commands(h);
6862                 /* Set 'accelerator path config change' bit */
6863                 dev_warn(&h->pdev->dev,
6864                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6865                         h->events, event_type);
6866                 writel(h->events, &(h->cfgtable->clear_event_notify));
6867                 /* Set the "clear event notify field update" bit 6 */
6868                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6869                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6870                 hpsa_wait_for_clear_event_notify_ack(h);
6871                 scsi_unblock_requests(h->scsi_host);
6872         } else {
6873                 /* Acknowledge controller notification events. */
6874                 writel(h->events, &(h->cfgtable->clear_event_notify));
6875                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6876                 hpsa_wait_for_clear_event_notify_ack(h);
6877 #if 0
6878                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6879                 hpsa_wait_for_mode_change_ack(h);
6880 #endif
6881         }
6882         return;
6883 }
6884
6885 /* Check a register on the controller to see if there are configuration
6886  * changes (added/changed/removed logical drives, etc.) which mean that
6887  * we should rescan the controller for devices.
6888  * Also check flag for driver-initiated rescan.
6889  */
6890 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6891 {
6892         if (h->drv_req_rescan)
6893                 return 1;
6894
6895         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6896                 return 0;
6897
6898         h->events = readl(&(h->cfgtable->event_notify));
6899         return h->events & RESCAN_REQUIRED_EVENT_BITS;
6900 }
6901
6902 /*
6903  * Check if any of the offline devices have become ready
6904  */
6905 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6906 {
6907         unsigned long flags;
6908         struct offline_device_entry *d;
6909         struct list_head *this, *tmp;
6910
6911         spin_lock_irqsave(&h->offline_device_lock, flags);
6912         list_for_each_safe(this, tmp, &h->offline_device_list) {
6913                 d = list_entry(this, struct offline_device_entry,
6914                                 offline_list);
6915                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6916                 if (!hpsa_volume_offline(h, d->scsi3addr))
6917                         return 1;
6918                 spin_lock_irqsave(&h->offline_device_lock, flags);
6919         }
6920         spin_unlock_irqrestore(&h->offline_device_lock, flags);
6921         return 0;
6922 }
6923
6924
6925 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6926 {
6927         unsigned long flags;
6928         struct ctlr_info *h = container_of(to_delayed_work(work),
6929                                         struct ctlr_info, monitor_ctlr_work);
6930         detect_controller_lockup(h);
6931         if (lockup_detected(h))
6932                 return;
6933
6934         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6935                 scsi_host_get(h->scsi_host);
6936                 h->drv_req_rescan = 0;
6937                 hpsa_ack_ctlr_events(h);
6938                 hpsa_scan_start(h->scsi_host);
6939                 scsi_host_put(h->scsi_host);
6940         }
6941
6942         spin_lock_irqsave(&h->lock, flags);
6943         if (h->remove_in_progress) {
6944                 spin_unlock_irqrestore(&h->lock, flags);
6945                 return;
6946         }
6947         schedule_delayed_work(&h->monitor_ctlr_work,
6948                                 h->heartbeat_sample_interval);
6949         spin_unlock_irqrestore(&h->lock, flags);
6950 }
6951
6952 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6953 {
6954         int dac, rc;
6955         struct ctlr_info *h;
6956         int try_soft_reset = 0;
6957         unsigned long flags;
6958
6959         if (number_of_controllers == 0)
6960                 printk(KERN_INFO DRIVER_NAME "\n");
6961
6962         rc = hpsa_init_reset_devices(pdev);
6963         if (rc) {
6964                 if (rc != -ENOTSUPP)
6965                         return rc;
6966                 /* If the reset fails in a particular way (it has no way to do
6967                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
6968                  * a soft reset once we get the controller configured up to the
6969                  * point that it can accept a command.
6970                  */
6971                 try_soft_reset = 1;
6972                 rc = 0;
6973         }
6974
6975 reinit_after_soft_reset:
6976
6977         /* Command structures must be aligned on a 32-byte boundary because
6978          * the 5 lower bits of the address are used by the hardware. and by
6979          * the driver.  See comments in hpsa.h for more info.
6980          */
6981         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6982         h = kzalloc(sizeof(*h), GFP_KERNEL);
6983         if (!h)
6984                 return -ENOMEM;
6985
6986         h->pdev = pdev;
6987         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6988         INIT_LIST_HEAD(&h->cmpQ);
6989         INIT_LIST_HEAD(&h->reqQ);
6990         INIT_LIST_HEAD(&h->offline_device_list);
6991         spin_lock_init(&h->lock);
6992         spin_lock_init(&h->offline_device_lock);
6993         spin_lock_init(&h->scan_lock);
6994         spin_lock_init(&h->passthru_count_lock);
6995
6996         /* Allocate and clear per-cpu variable lockup_detected */
6997         h->lockup_detected = alloc_percpu(u32);
6998         if (!h->lockup_detected)
6999                 goto clean1;
7000         set_lockup_detected_for_all_cpus(h, 0);
7001
7002         rc = hpsa_pci_init(h);
7003         if (rc != 0)
7004                 goto clean1;
7005
7006         sprintf(h->devname, HPSA "%d", number_of_controllers);
7007         h->ctlr = number_of_controllers;
7008         number_of_controllers++;
7009
7010         /* configure PCI DMA stuff */
7011         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7012         if (rc == 0) {
7013                 dac = 1;
7014         } else {
7015                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7016                 if (rc == 0) {
7017                         dac = 0;
7018                 } else {
7019                         dev_err(&pdev->dev, "no suitable DMA available\n");
7020                         goto clean1;
7021                 }
7022         }
7023
7024         /* make sure the board interrupts are off */
7025         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7026
7027         if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
7028                 goto clean2;
7029         dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7030                h->devname, pdev->device,
7031                h->intr[h->intr_mode], dac ? "" : " not");
7032         if (hpsa_allocate_cmd_pool(h))
7033                 goto clean4;
7034         if (hpsa_allocate_sg_chain_blocks(h))
7035                 goto clean4;
7036         init_waitqueue_head(&h->scan_wait_queue);
7037         h->scan_finished = 1; /* no scan currently in progress */
7038
7039         pci_set_drvdata(pdev, h);
7040         h->ndevices = 0;
7041         h->hba_mode_enabled = 0;
7042         h->scsi_host = NULL;
7043         spin_lock_init(&h->devlock);
7044         hpsa_put_ctlr_into_performant_mode(h);
7045
7046         /* At this point, the controller is ready to take commands.
7047          * Now, if reset_devices and the hard reset didn't work, try
7048          * the soft reset and see if that works.
7049          */
7050         if (try_soft_reset) {
7051
7052                 /* This is kind of gross.  We may or may not get a completion
7053                  * from the soft reset command, and if we do, then the value
7054                  * from the fifo may or may not be valid.  So, we wait 10 secs
7055                  * after the reset throwing away any completions we get during
7056                  * that time.  Unregister the interrupt handler and register
7057                  * fake ones to scoop up any residual completions.
7058                  */
7059                 spin_lock_irqsave(&h->lock, flags);
7060                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7061                 spin_unlock_irqrestore(&h->lock, flags);
7062                 free_irqs(h);
7063                 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
7064                                         hpsa_intx_discard_completions);
7065                 if (rc) {
7066                         dev_warn(&h->pdev->dev, "Failed to request_irq after "
7067                                 "soft reset.\n");
7068                         goto clean4;
7069                 }
7070
7071                 rc = hpsa_kdump_soft_reset(h);
7072                 if (rc)
7073                         /* Neither hard nor soft reset worked, we're hosed. */
7074                         goto clean4;
7075
7076                 dev_info(&h->pdev->dev, "Board READY.\n");
7077                 dev_info(&h->pdev->dev,
7078                         "Waiting for stale completions to drain.\n");
7079                 h->access.set_intr_mask(h, HPSA_INTR_ON);
7080                 msleep(10000);
7081                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7082
7083                 rc = controller_reset_failed(h->cfgtable);
7084                 if (rc)
7085                         dev_info(&h->pdev->dev,
7086                                 "Soft reset appears to have failed.\n");
7087
7088                 /* since the controller's reset, we have to go back and re-init
7089                  * everything.  Easiest to just forget what we've done and do it
7090                  * all over again.
7091                  */
7092                 hpsa_undo_allocations_after_kdump_soft_reset(h);
7093                 try_soft_reset = 0;
7094                 if (rc)
7095                         /* don't go to clean4, we already unallocated */
7096                         return -ENODEV;
7097
7098                 goto reinit_after_soft_reset;
7099         }
7100
7101                 /* Enable Accelerated IO path at driver layer */
7102                 h->acciopath_status = 1;
7103
7104         h->drv_req_rescan = 0;
7105
7106         /* Turn the interrupts on so we can service requests */
7107         h->access.set_intr_mask(h, HPSA_INTR_ON);
7108
7109         hpsa_hba_inquiry(h);
7110         hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
7111
7112         /* Monitor the controller for firmware lockups */
7113         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7114         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7115         schedule_delayed_work(&h->monitor_ctlr_work,
7116                                 h->heartbeat_sample_interval);
7117         return 0;
7118
7119 clean4:
7120         hpsa_free_sg_chain_blocks(h);
7121         hpsa_free_cmd_pool(h);
7122         free_irqs(h);
7123 clean2:
7124 clean1:
7125         if (h->lockup_detected)
7126                 free_percpu(h->lockup_detected);
7127         kfree(h);
7128         return rc;
7129 }
7130
7131 static void hpsa_flush_cache(struct ctlr_info *h)
7132 {
7133         char *flush_buf;
7134         struct CommandList *c;
7135
7136         /* Don't bother trying to flush the cache if locked up */
7137         if (unlikely(lockup_detected(h)))
7138                 return;
7139         flush_buf = kzalloc(4, GFP_KERNEL);
7140         if (!flush_buf)
7141                 return;
7142
7143         c = cmd_special_alloc(h);
7144         if (!c) {
7145                 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7146                 goto out_of_memory;
7147         }
7148         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7149                 RAID_CTLR_LUNID, TYPE_CMD)) {
7150                 goto out;
7151         }
7152         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7153         if (c->err_info->CommandStatus != 0)
7154 out:
7155                 dev_warn(&h->pdev->dev,
7156                         "error flushing cache on controller\n");
7157         cmd_special_free(h, c);
7158 out_of_memory:
7159         kfree(flush_buf);
7160 }
7161
7162 static void hpsa_shutdown(struct pci_dev *pdev)
7163 {
7164         struct ctlr_info *h;
7165
7166         h = pci_get_drvdata(pdev);
7167         /* Turn board interrupts off  and send the flush cache command
7168          * sendcmd will turn off interrupt, and send the flush...
7169          * To write all data in the battery backed cache to disks
7170          */
7171         hpsa_flush_cache(h);
7172         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7173         hpsa_free_irqs_and_disable_msix(h);
7174 }
7175
7176 static void hpsa_free_device_info(struct ctlr_info *h)
7177 {
7178         int i;
7179
7180         for (i = 0; i < h->ndevices; i++)
7181                 kfree(h->dev[i]);
7182 }
7183
7184 static void hpsa_remove_one(struct pci_dev *pdev)
7185 {
7186         struct ctlr_info *h;
7187         unsigned long flags;
7188
7189         if (pci_get_drvdata(pdev) == NULL) {
7190                 dev_err(&pdev->dev, "unable to remove device\n");
7191                 return;
7192         }
7193         h = pci_get_drvdata(pdev);
7194
7195         /* Get rid of any controller monitoring work items */
7196         spin_lock_irqsave(&h->lock, flags);
7197         h->remove_in_progress = 1;
7198         cancel_delayed_work(&h->monitor_ctlr_work);
7199         spin_unlock_irqrestore(&h->lock, flags);
7200
7201         hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
7202         hpsa_shutdown(pdev);
7203         iounmap(h->vaddr);
7204         iounmap(h->transtable);
7205         iounmap(h->cfgtable);
7206         hpsa_free_device_info(h);
7207         hpsa_free_sg_chain_blocks(h);
7208         pci_free_consistent(h->pdev,
7209                 h->nr_cmds * sizeof(struct CommandList),
7210                 h->cmd_pool, h->cmd_pool_dhandle);
7211         pci_free_consistent(h->pdev,
7212                 h->nr_cmds * sizeof(struct ErrorInfo),
7213                 h->errinfo_pool, h->errinfo_pool_dhandle);
7214         hpsa_free_reply_queues(h);
7215         kfree(h->cmd_pool_bits);
7216         kfree(h->blockFetchTable);
7217         kfree(h->ioaccel1_blockFetchTable);
7218         kfree(h->ioaccel2_blockFetchTable);
7219         kfree(h->hba_inquiry_data);
7220         pci_disable_device(pdev);
7221         pci_release_regions(pdev);
7222         free_percpu(h->lockup_detected);
7223         kfree(h);
7224 }
7225
7226 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7227         __attribute__((unused)) pm_message_t state)
7228 {
7229         return -ENOSYS;
7230 }
7231
7232 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7233 {
7234         return -ENOSYS;
7235 }
7236
7237 static struct pci_driver hpsa_pci_driver = {
7238         .name = HPSA,
7239         .probe = hpsa_init_one,
7240         .remove = hpsa_remove_one,
7241         .id_table = hpsa_pci_device_id, /* id_table */
7242         .shutdown = hpsa_shutdown,
7243         .suspend = hpsa_suspend,
7244         .resume = hpsa_resume,
7245 };
7246
7247 /* Fill in bucket_map[], given nsgs (the max number of
7248  * scatter gather elements supported) and bucket[],
7249  * which is an array of 8 integers.  The bucket[] array
7250  * contains 8 different DMA transfer sizes (in 16
7251  * byte increments) which the controller uses to fetch
7252  * commands.  This function fills in bucket_map[], which
7253  * maps a given number of scatter gather elements to one of
7254  * the 8 DMA transfer sizes.  The point of it is to allow the
7255  * controller to only do as much DMA as needed to fetch the
7256  * command, with the DMA transfer size encoded in the lower
7257  * bits of the command address.
7258  */
7259 static void  calc_bucket_map(int bucket[], int num_buckets,
7260         int nsgs, int min_blocks, int *bucket_map)
7261 {
7262         int i, j, b, size;
7263
7264         /* Note, bucket_map must have nsgs+1 entries. */
7265         for (i = 0; i <= nsgs; i++) {
7266                 /* Compute size of a command with i SG entries */
7267                 size = i + min_blocks;
7268                 b = num_buckets; /* Assume the biggest bucket */
7269                 /* Find the bucket that is just big enough */
7270                 for (j = 0; j < num_buckets; j++) {
7271                         if (bucket[j] >= size) {
7272                                 b = j;
7273                                 break;
7274                         }
7275                 }
7276                 /* for a command with i SG entries, use bucket b. */
7277                 bucket_map[i] = b;
7278         }
7279 }
7280
7281 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7282 {
7283         int i;
7284         unsigned long register_value;
7285         unsigned long transMethod = CFGTBL_Trans_Performant |
7286                         (trans_support & CFGTBL_Trans_use_short_tags) |
7287                                 CFGTBL_Trans_enable_directed_msix |
7288                         (trans_support & (CFGTBL_Trans_io_accel1 |
7289                                 CFGTBL_Trans_io_accel2));
7290         struct access_method access = SA5_performant_access;
7291
7292         /* This is a bit complicated.  There are 8 registers on
7293          * the controller which we write to to tell it 8 different
7294          * sizes of commands which there may be.  It's a way of
7295          * reducing the DMA done to fetch each command.  Encoded into
7296          * each command's tag are 3 bits which communicate to the controller
7297          * which of the eight sizes that command fits within.  The size of
7298          * each command depends on how many scatter gather entries there are.
7299          * Each SG entry requires 16 bytes.  The eight registers are programmed
7300          * with the number of 16-byte blocks a command of that size requires.
7301          * The smallest command possible requires 5 such 16 byte blocks.
7302          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7303          * blocks.  Note, this only extends to the SG entries contained
7304          * within the command block, and does not extend to chained blocks
7305          * of SG elements.   bft[] contains the eight values we write to
7306          * the registers.  They are not evenly distributed, but have more
7307          * sizes for small commands, and fewer sizes for larger commands.
7308          */
7309         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7310 #define MIN_IOACCEL2_BFT_ENTRY 5
7311 #define HPSA_IOACCEL2_HEADER_SZ 4
7312         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7313                         13, 14, 15, 16, 17, 18, 19,
7314                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7315         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7316         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7317         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7318                                  16 * MIN_IOACCEL2_BFT_ENTRY);
7319         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7320         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7321         /*  5 = 1 s/g entry or 4k
7322          *  6 = 2 s/g entry or 8k
7323          *  8 = 4 s/g entry or 16k
7324          * 10 = 6 s/g entry or 24k
7325          */
7326
7327         /* If the controller supports either ioaccel method then
7328          * we can also use the RAID stack submit path that does not
7329          * perform the superfluous readl() after each command submission.
7330          */
7331         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7332                 access = SA5_performant_access_no_read;
7333
7334         /* Controller spec: zero out this buffer. */
7335         for (i = 0; i < h->nreply_queues; i++)
7336                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7337
7338         bft[7] = SG_ENTRIES_IN_CMD + 4;
7339         calc_bucket_map(bft, ARRAY_SIZE(bft),
7340                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7341         for (i = 0; i < 8; i++)
7342                 writel(bft[i], &h->transtable->BlockFetch[i]);
7343
7344         /* size of controller ring buffer */
7345         writel(h->max_commands, &h->transtable->RepQSize);
7346         writel(h->nreply_queues, &h->transtable->RepQCount);
7347         writel(0, &h->transtable->RepQCtrAddrLow32);
7348         writel(0, &h->transtable->RepQCtrAddrHigh32);
7349
7350         for (i = 0; i < h->nreply_queues; i++) {
7351                 writel(0, &h->transtable->RepQAddr[i].upper);
7352                 writel(h->reply_queue[i].busaddr,
7353                         &h->transtable->RepQAddr[i].lower);
7354         }
7355
7356         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7357         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7358         /*
7359          * enable outbound interrupt coalescing in accelerator mode;
7360          */
7361         if (trans_support & CFGTBL_Trans_io_accel1) {
7362                 access = SA5_ioaccel_mode1_access;
7363                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7364                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7365         } else {
7366                 if (trans_support & CFGTBL_Trans_io_accel2) {
7367                         access = SA5_ioaccel_mode2_access;
7368                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7369                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7370                 }
7371         }
7372         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7373         hpsa_wait_for_mode_change_ack(h);
7374         register_value = readl(&(h->cfgtable->TransportActive));
7375         if (!(register_value & CFGTBL_Trans_Performant)) {
7376                 dev_warn(&h->pdev->dev, "unable to get board into"
7377                                         " performant mode\n");
7378                 return;
7379         }
7380         /* Change the access methods to the performant access methods */
7381         h->access = access;
7382         h->transMethod = transMethod;
7383
7384         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7385                 (trans_support & CFGTBL_Trans_io_accel2)))
7386                 return;
7387
7388         if (trans_support & CFGTBL_Trans_io_accel1) {
7389                 /* Set up I/O accelerator mode */
7390                 for (i = 0; i < h->nreply_queues; i++) {
7391                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7392                         h->reply_queue[i].current_entry =
7393                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7394                 }
7395                 bft[7] = h->ioaccel_maxsg + 8;
7396                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7397                                 h->ioaccel1_blockFetchTable);
7398
7399                 /* initialize all reply queue entries to unused */
7400                 for (i = 0; i < h->nreply_queues; i++)
7401                         memset(h->reply_queue[i].head,
7402                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7403                                 h->reply_queue_size);
7404
7405                 /* set all the constant fields in the accelerator command
7406                  * frames once at init time to save CPU cycles later.
7407                  */
7408                 for (i = 0; i < h->nr_cmds; i++) {
7409                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7410
7411                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
7412                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
7413                                         (i * sizeof(struct ErrorInfo)));
7414                         cp->err_info_len = sizeof(struct ErrorInfo);
7415                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
7416                         cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7417                         cp->timeout_sec = 0;
7418                         cp->ReplyQueue = 0;
7419                         cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
7420                                                 DIRECT_LOOKUP_BIT;
7421                         cp->Tag.upper = 0;
7422                         cp->host_addr.lower =
7423                                 (u32) (h->ioaccel_cmd_pool_dhandle +
7424                                         (i * sizeof(struct io_accel1_cmd)));
7425                         cp->host_addr.upper = 0;
7426                 }
7427         } else if (trans_support & CFGTBL_Trans_io_accel2) {
7428                 u64 cfg_offset, cfg_base_addr_index;
7429                 u32 bft2_offset, cfg_base_addr;
7430                 int rc;
7431
7432                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7433                         &cfg_base_addr_index, &cfg_offset);
7434                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7435                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7436                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7437                                 4, h->ioaccel2_blockFetchTable);
7438                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7439                 BUILD_BUG_ON(offsetof(struct CfgTable,
7440                                 io_accel_request_size_offset) != 0xb8);
7441                 h->ioaccel2_bft2_regs =
7442                         remap_pci_mem(pci_resource_start(h->pdev,
7443                                         cfg_base_addr_index) +
7444                                         cfg_offset + bft2_offset,
7445                                         ARRAY_SIZE(bft2) *
7446                                         sizeof(*h->ioaccel2_bft2_regs));
7447                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7448                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7449         }
7450         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7451         hpsa_wait_for_mode_change_ack(h);
7452 }
7453
7454 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7455 {
7456         h->ioaccel_maxsg =
7457                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7458         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7459                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7460
7461         /* Command structures must be aligned on a 128-byte boundary
7462          * because the 7 lower bits of the address are used by the
7463          * hardware.
7464          */
7465         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7466                         IOACCEL1_COMMANDLIST_ALIGNMENT);
7467         h->ioaccel_cmd_pool =
7468                 pci_alloc_consistent(h->pdev,
7469                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7470                         &(h->ioaccel_cmd_pool_dhandle));
7471
7472         h->ioaccel1_blockFetchTable =
7473                 kmalloc(((h->ioaccel_maxsg + 1) *
7474                                 sizeof(u32)), GFP_KERNEL);
7475
7476         if ((h->ioaccel_cmd_pool == NULL) ||
7477                 (h->ioaccel1_blockFetchTable == NULL))
7478                 goto clean_up;
7479
7480         memset(h->ioaccel_cmd_pool, 0,
7481                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7482         return 0;
7483
7484 clean_up:
7485         if (h->ioaccel_cmd_pool)
7486                 pci_free_consistent(h->pdev,
7487                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7488                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7489         kfree(h->ioaccel1_blockFetchTable);
7490         return 1;
7491 }
7492
7493 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7494 {
7495         /* Allocate ioaccel2 mode command blocks and block fetch table */
7496
7497         h->ioaccel_maxsg =
7498                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7499         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7500                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7501
7502         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7503                         IOACCEL2_COMMANDLIST_ALIGNMENT);
7504         h->ioaccel2_cmd_pool =
7505                 pci_alloc_consistent(h->pdev,
7506                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7507                         &(h->ioaccel2_cmd_pool_dhandle));
7508
7509         h->ioaccel2_blockFetchTable =
7510                 kmalloc(((h->ioaccel_maxsg + 1) *
7511                                 sizeof(u32)), GFP_KERNEL);
7512
7513         if ((h->ioaccel2_cmd_pool == NULL) ||
7514                 (h->ioaccel2_blockFetchTable == NULL))
7515                 goto clean_up;
7516
7517         memset(h->ioaccel2_cmd_pool, 0,
7518                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7519         return 0;
7520
7521 clean_up:
7522         if (h->ioaccel2_cmd_pool)
7523                 pci_free_consistent(h->pdev,
7524                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7525                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7526         kfree(h->ioaccel2_blockFetchTable);
7527         return 1;
7528 }
7529
7530 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7531 {
7532         u32 trans_support;
7533         unsigned long transMethod = CFGTBL_Trans_Performant |
7534                                         CFGTBL_Trans_use_short_tags;
7535         int i;
7536
7537         if (hpsa_simple_mode)
7538                 return;
7539
7540         trans_support = readl(&(h->cfgtable->TransportSupport));
7541         if (!(trans_support & PERFORMANT_MODE))
7542                 return;
7543
7544         /* Check for I/O accelerator mode support */
7545         if (trans_support & CFGTBL_Trans_io_accel1) {
7546                 transMethod |= CFGTBL_Trans_io_accel1 |
7547                                 CFGTBL_Trans_enable_directed_msix;
7548                 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7549                         goto clean_up;
7550         } else {
7551                 if (trans_support & CFGTBL_Trans_io_accel2) {
7552                                 transMethod |= CFGTBL_Trans_io_accel2 |
7553                                 CFGTBL_Trans_enable_directed_msix;
7554                 if (ioaccel2_alloc_cmds_and_bft(h))
7555                         goto clean_up;
7556                 }
7557         }
7558
7559         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7560         hpsa_get_max_perf_mode_cmds(h);
7561         /* Performant mode ring buffer and supporting data structures */
7562         h->reply_queue_size = h->max_commands * sizeof(u64);
7563
7564         for (i = 0; i < h->nreply_queues; i++) {
7565                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7566                                                 h->reply_queue_size,
7567                                                 &(h->reply_queue[i].busaddr));
7568                 if (!h->reply_queue[i].head)
7569                         goto clean_up;
7570                 h->reply_queue[i].size = h->max_commands;
7571                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
7572                 h->reply_queue[i].current_entry = 0;
7573         }
7574
7575         /* Need a block fetch table for performant mode */
7576         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7577                                 sizeof(u32)), GFP_KERNEL);
7578         if (!h->blockFetchTable)
7579                 goto clean_up;
7580
7581         hpsa_enter_performant_mode(h, trans_support);
7582         return;
7583
7584 clean_up:
7585         hpsa_free_reply_queues(h);
7586         kfree(h->blockFetchTable);
7587 }
7588
7589 static int is_accelerated_cmd(struct CommandList *c)
7590 {
7591         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7592 }
7593
7594 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7595 {
7596         struct CommandList *c = NULL;
7597         unsigned long flags;
7598         int accel_cmds_out;
7599
7600         do { /* wait for all outstanding commands to drain out */
7601                 accel_cmds_out = 0;
7602                 spin_lock_irqsave(&h->lock, flags);
7603                 list_for_each_entry(c, &h->cmpQ, list)
7604                         accel_cmds_out += is_accelerated_cmd(c);
7605                 list_for_each_entry(c, &h->reqQ, list)
7606                         accel_cmds_out += is_accelerated_cmd(c);
7607                 spin_unlock_irqrestore(&h->lock, flags);
7608                 if (accel_cmds_out <= 0)
7609                         break;
7610                 msleep(100);
7611         } while (1);
7612 }
7613
7614 /*
7615  *  This is it.  Register the PCI driver information for the cards we control
7616  *  the OS will call our registered routines when it finds one of our cards.
7617  */
7618 static int __init hpsa_init(void)
7619 {
7620         return pci_register_driver(&hpsa_pci_driver);
7621 }
7622
7623 static void __exit hpsa_cleanup(void)
7624 {
7625         pci_unregister_driver(&hpsa_pci_driver);
7626 }
7627
7628 static void __attribute__((unused)) verify_offsets(void)
7629 {
7630 #define VERIFY_OFFSET(member, offset) \
7631         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7632
7633         VERIFY_OFFSET(structure_size, 0);
7634         VERIFY_OFFSET(volume_blk_size, 4);
7635         VERIFY_OFFSET(volume_blk_cnt, 8);
7636         VERIFY_OFFSET(phys_blk_shift, 16);
7637         VERIFY_OFFSET(parity_rotation_shift, 17);
7638         VERIFY_OFFSET(strip_size, 18);
7639         VERIFY_OFFSET(disk_starting_blk, 20);
7640         VERIFY_OFFSET(disk_blk_cnt, 28);
7641         VERIFY_OFFSET(data_disks_per_row, 36);
7642         VERIFY_OFFSET(metadata_disks_per_row, 38);
7643         VERIFY_OFFSET(row_cnt, 40);
7644         VERIFY_OFFSET(layout_map_count, 42);
7645         VERIFY_OFFSET(flags, 44);
7646         VERIFY_OFFSET(dekindex, 46);
7647         /* VERIFY_OFFSET(reserved, 48 */
7648         VERIFY_OFFSET(data, 64);
7649
7650 #undef VERIFY_OFFSET
7651
7652 #define VERIFY_OFFSET(member, offset) \
7653         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7654
7655         VERIFY_OFFSET(IU_type, 0);
7656         VERIFY_OFFSET(direction, 1);
7657         VERIFY_OFFSET(reply_queue, 2);
7658         /* VERIFY_OFFSET(reserved1, 3);  */
7659         VERIFY_OFFSET(scsi_nexus, 4);
7660         VERIFY_OFFSET(Tag, 8);
7661         VERIFY_OFFSET(cdb, 16);
7662         VERIFY_OFFSET(cciss_lun, 32);
7663         VERIFY_OFFSET(data_len, 40);
7664         VERIFY_OFFSET(cmd_priority_task_attr, 44);
7665         VERIFY_OFFSET(sg_count, 45);
7666         /* VERIFY_OFFSET(reserved3 */
7667         VERIFY_OFFSET(err_ptr, 48);
7668         VERIFY_OFFSET(err_len, 56);
7669         /* VERIFY_OFFSET(reserved4  */
7670         VERIFY_OFFSET(sg, 64);
7671
7672 #undef VERIFY_OFFSET
7673
7674 #define VERIFY_OFFSET(member, offset) \
7675         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7676
7677         VERIFY_OFFSET(dev_handle, 0x00);
7678         VERIFY_OFFSET(reserved1, 0x02);
7679         VERIFY_OFFSET(function, 0x03);
7680         VERIFY_OFFSET(reserved2, 0x04);
7681         VERIFY_OFFSET(err_info, 0x0C);
7682         VERIFY_OFFSET(reserved3, 0x10);
7683         VERIFY_OFFSET(err_info_len, 0x12);
7684         VERIFY_OFFSET(reserved4, 0x13);
7685         VERIFY_OFFSET(sgl_offset, 0x14);
7686         VERIFY_OFFSET(reserved5, 0x15);
7687         VERIFY_OFFSET(transfer_len, 0x1C);
7688         VERIFY_OFFSET(reserved6, 0x20);
7689         VERIFY_OFFSET(io_flags, 0x24);
7690         VERIFY_OFFSET(reserved7, 0x26);
7691         VERIFY_OFFSET(LUN, 0x34);
7692         VERIFY_OFFSET(control, 0x3C);
7693         VERIFY_OFFSET(CDB, 0x40);
7694         VERIFY_OFFSET(reserved8, 0x50);
7695         VERIFY_OFFSET(host_context_flags, 0x60);
7696         VERIFY_OFFSET(timeout_sec, 0x62);
7697         VERIFY_OFFSET(ReplyQueue, 0x64);
7698         VERIFY_OFFSET(reserved9, 0x65);
7699         VERIFY_OFFSET(Tag, 0x68);
7700         VERIFY_OFFSET(host_addr, 0x70);
7701         VERIFY_OFFSET(CISS_LUN, 0x78);
7702         VERIFY_OFFSET(SG, 0x78 + 8);
7703 #undef VERIFY_OFFSET
7704 }
7705
7706 module_init(hpsa_init);
7707 module_exit(hpsa_cleanup);