]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/scsi/mpt3sas/mpt3sas_base.c
[SCSI] mpt3sas: remove unused variables
[karo-tx-linux.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012  LSI Corporation
7  *  (mailto:DL-MPTFusionLinux@lsi.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44
45 #include <linux/kernel.h>
46 #include <linux/module.h>
47 #include <linux/errno.h>
48 #include <linux/init.h>
49 #include <linux/slab.h>
50 #include <linux/types.h>
51 #include <linux/pci.h>
52 #include <linux/kdev_t.h>
53 #include <linux/blkdev.h>
54 #include <linux/delay.h>
55 #include <linux/interrupt.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/io.h>
58 #include <linux/time.h>
59 #include <linux/kthread.h>
60 #include <linux/aer.h>
61
62
63 #include "mpt3sas_base.h"
64
65 static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
66
67
68 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
69
70  /* maximum controller queue depth */
71 #define MAX_HBA_QUEUE_DEPTH     30000
72 #define MAX_CHAIN_DEPTH         100000
73 static int max_queue_depth = -1;
74 module_param(max_queue_depth, int, 0);
75 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
76
77 static int max_sgl_entries = -1;
78 module_param(max_sgl_entries, int, 0);
79 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
80
81 static int msix_disable = -1;
82 module_param(msix_disable, int, 0);
83 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
84
85
86 static int mpt3sas_fwfault_debug;
87 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
88         " enable detection of firmware fault and halt firmware - (default=0)");
89
90
91 /**
92  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
93  *
94  */
95 static int
96 _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
97 {
98         int ret = param_set_int(val, kp);
99         struct MPT3SAS_ADAPTER *ioc;
100
101         if (ret)
102                 return ret;
103
104         pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
105         list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
106                 ioc->fwfault_debug = mpt3sas_fwfault_debug;
107         return 0;
108 }
109 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
110         param_get_int, &mpt3sas_fwfault_debug, 0644);
111
112 /**
113  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
114  * @arg: input argument, used to derive ioc
115  *
116  * Return 0 if controller is removed from pci subsystem.
117  * Return -1 for other case.
118  */
119 static int mpt3sas_remove_dead_ioc_func(void *arg)
120 {
121         struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
122         struct pci_dev *pdev;
123
124         if ((ioc == NULL))
125                 return -1;
126
127         pdev = ioc->pdev;
128         if ((pdev == NULL))
129                 return -1;
130         pci_stop_and_remove_bus_device(pdev);
131         return 0;
132 }
133
134 /**
135  * _base_fault_reset_work - workq handling ioc fault conditions
136  * @work: input argument, used to derive ioc
137  * Context: sleep.
138  *
139  * Return nothing.
140  */
141 static void
142 _base_fault_reset_work(struct work_struct *work)
143 {
144         struct MPT3SAS_ADAPTER *ioc =
145             container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
146         unsigned long    flags;
147         u32 doorbell;
148         int rc;
149         struct task_struct *p;
150
151
152         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
153         if (ioc->shost_recovery)
154                 goto rearm_timer;
155         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
156
157         doorbell = mpt3sas_base_get_iocstate(ioc, 0);
158         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
159                 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
160                     ioc->name);
161
162                 /*
163                  * Call _scsih_flush_pending_cmds callback so that we flush all
164                  * pending commands back to OS. This call is required to aovid
165                  * deadlock at block layer. Dead IOC will fail to do diag reset,
166                  * and this call is safe since dead ioc will never return any
167                  * command back from HW.
168                  */
169                 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
170                 /*
171                  * Set remove_host flag early since kernel thread will
172                  * take some time to execute.
173                  */
174                 ioc->remove_host = 1;
175                 /*Remove the Dead Host */
176                 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
177                     "mpt3sas_dead_ioc_%d", ioc->id);
178                 if (IS_ERR(p))
179                         pr_err(MPT3SAS_FMT
180                         "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
181                         ioc->name, __func__);
182                 else
183                         pr_err(MPT3SAS_FMT
184                         "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
185                         ioc->name, __func__);
186                 return; /* don't rearm timer */
187         }
188
189         if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
190                 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
191                     FORCE_BIG_HAMMER);
192                 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
193                     __func__, (rc == 0) ? "success" : "failed");
194                 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
195                 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
196                         mpt3sas_base_fault_info(ioc, doorbell &
197                             MPI2_DOORBELL_DATA_MASK);
198                 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
199                     MPI2_IOC_STATE_OPERATIONAL)
200                         return; /* don't rearm timer */
201         }
202
203         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
204  rearm_timer:
205         if (ioc->fault_reset_work_q)
206                 queue_delayed_work(ioc->fault_reset_work_q,
207                     &ioc->fault_reset_work,
208                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
209         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
210 }
211
212 /**
213  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
214  * @ioc: per adapter object
215  * Context: sleep.
216  *
217  * Return nothing.
218  */
219 void
220 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
221 {
222         unsigned long    flags;
223
224         if (ioc->fault_reset_work_q)
225                 return;
226
227         /* initialize fault polling */
228
229         INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
230         snprintf(ioc->fault_reset_work_q_name,
231             sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
232         ioc->fault_reset_work_q =
233                 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
234         if (!ioc->fault_reset_work_q) {
235                 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
236                     ioc->name, __func__, __LINE__);
237                         return;
238         }
239         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
240         if (ioc->fault_reset_work_q)
241                 queue_delayed_work(ioc->fault_reset_work_q,
242                     &ioc->fault_reset_work,
243                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
244         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
245 }
246
247 /**
248  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
249  * @ioc: per adapter object
250  * Context: sleep.
251  *
252  * Return nothing.
253  */
254 void
255 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
256 {
257         unsigned long flags;
258         struct workqueue_struct *wq;
259
260         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
261         wq = ioc->fault_reset_work_q;
262         ioc->fault_reset_work_q = NULL;
263         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
264         if (wq) {
265                 if (!cancel_delayed_work(&ioc->fault_reset_work))
266                         flush_workqueue(wq);
267                 destroy_workqueue(wq);
268         }
269 }
270
271 /**
272  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
273  * @ioc: per adapter object
274  * @fault_code: fault code
275  *
276  * Return nothing.
277  */
278 void
279 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
280 {
281         pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
282             ioc->name, fault_code);
283 }
284
285 /**
286  * mpt3sas_halt_firmware - halt's mpt controller firmware
287  * @ioc: per adapter object
288  *
289  * For debugging timeout related issues.  Writing 0xCOFFEE00
290  * to the doorbell register will halt controller firmware. With
291  * the purpose to stop both driver and firmware, the enduser can
292  * obtain a ring buffer from controller UART.
293  */
294 void
295 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
296 {
297         u32 doorbell;
298
299         if (!ioc->fwfault_debug)
300                 return;
301
302         dump_stack();
303
304         doorbell = readl(&ioc->chip->Doorbell);
305         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
306                 mpt3sas_base_fault_info(ioc , doorbell);
307         else {
308                 writel(0xC0FFEE00, &ioc->chip->Doorbell);
309                 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
310                         ioc->name);
311         }
312
313         if (ioc->fwfault_debug == 2)
314                 for (;;)
315                         ;
316         else
317                 panic("panic in %s\n", __func__);
318 }
319
320 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
321 /**
322  * _base_sas_ioc_info - verbose translation of the ioc status
323  * @ioc: per adapter object
324  * @mpi_reply: reply mf payload returned from firmware
325  * @request_hdr: request mf
326  *
327  * Return nothing.
328  */
329 static void
330 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
331         MPI2RequestHeader_t *request_hdr)
332 {
333         u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
334             MPI2_IOCSTATUS_MASK;
335         char *desc = NULL;
336         u16 frame_sz;
337         char *func_str = NULL;
338
339         /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
340         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
341             request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
342             request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
343                 return;
344
345         if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
346                 return;
347
348         switch (ioc_status) {
349
350 /****************************************************************************
351 *  Common IOCStatus values for all replies
352 ****************************************************************************/
353
354         case MPI2_IOCSTATUS_INVALID_FUNCTION:
355                 desc = "invalid function";
356                 break;
357         case MPI2_IOCSTATUS_BUSY:
358                 desc = "busy";
359                 break;
360         case MPI2_IOCSTATUS_INVALID_SGL:
361                 desc = "invalid sgl";
362                 break;
363         case MPI2_IOCSTATUS_INTERNAL_ERROR:
364                 desc = "internal error";
365                 break;
366         case MPI2_IOCSTATUS_INVALID_VPID:
367                 desc = "invalid vpid";
368                 break;
369         case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
370                 desc = "insufficient resources";
371                 break;
372         case MPI2_IOCSTATUS_INVALID_FIELD:
373                 desc = "invalid field";
374                 break;
375         case MPI2_IOCSTATUS_INVALID_STATE:
376                 desc = "invalid state";
377                 break;
378         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
379                 desc = "op state not supported";
380                 break;
381
382 /****************************************************************************
383 *  Config IOCStatus values
384 ****************************************************************************/
385
386         case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
387                 desc = "config invalid action";
388                 break;
389         case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
390                 desc = "config invalid type";
391                 break;
392         case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
393                 desc = "config invalid page";
394                 break;
395         case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
396                 desc = "config invalid data";
397                 break;
398         case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
399                 desc = "config no defaults";
400                 break;
401         case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
402                 desc = "config cant commit";
403                 break;
404
405 /****************************************************************************
406 *  SCSI IO Reply
407 ****************************************************************************/
408
409         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
410         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
411         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
412         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
413         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
414         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
415         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
416         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
417         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
418         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
419         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
420         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
421                 break;
422
423 /****************************************************************************
424 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
425 ****************************************************************************/
426
427         case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
428                 desc = "eedp guard error";
429                 break;
430         case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
431                 desc = "eedp ref tag error";
432                 break;
433         case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
434                 desc = "eedp app tag error";
435                 break;
436
437 /****************************************************************************
438 *  SCSI Target values
439 ****************************************************************************/
440
441         case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
442                 desc = "target invalid io index";
443                 break;
444         case MPI2_IOCSTATUS_TARGET_ABORTED:
445                 desc = "target aborted";
446                 break;
447         case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
448                 desc = "target no conn retryable";
449                 break;
450         case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
451                 desc = "target no connection";
452                 break;
453         case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
454                 desc = "target xfer count mismatch";
455                 break;
456         case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
457                 desc = "target data offset error";
458                 break;
459         case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
460                 desc = "target too much write data";
461                 break;
462         case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
463                 desc = "target iu too short";
464                 break;
465         case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
466                 desc = "target ack nak timeout";
467                 break;
468         case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
469                 desc = "target nak received";
470                 break;
471
472 /****************************************************************************
473 *  Serial Attached SCSI values
474 ****************************************************************************/
475
476         case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
477                 desc = "smp request failed";
478                 break;
479         case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
480                 desc = "smp data overrun";
481                 break;
482
483 /****************************************************************************
484 *  Diagnostic Buffer Post / Diagnostic Release values
485 ****************************************************************************/
486
487         case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
488                 desc = "diagnostic released";
489                 break;
490         default:
491                 break;
492         }
493
494         if (!desc)
495                 return;
496
497         switch (request_hdr->Function) {
498         case MPI2_FUNCTION_CONFIG:
499                 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
500                 func_str = "config_page";
501                 break;
502         case MPI2_FUNCTION_SCSI_TASK_MGMT:
503                 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
504                 func_str = "task_mgmt";
505                 break;
506         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
507                 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
508                 func_str = "sas_iounit_ctl";
509                 break;
510         case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
511                 frame_sz = sizeof(Mpi2SepRequest_t);
512                 func_str = "enclosure";
513                 break;
514         case MPI2_FUNCTION_IOC_INIT:
515                 frame_sz = sizeof(Mpi2IOCInitRequest_t);
516                 func_str = "ioc_init";
517                 break;
518         case MPI2_FUNCTION_PORT_ENABLE:
519                 frame_sz = sizeof(Mpi2PortEnableRequest_t);
520                 func_str = "port_enable";
521                 break;
522         case MPI2_FUNCTION_SMP_PASSTHROUGH:
523                 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
524                 func_str = "smp_passthru";
525                 break;
526         default:
527                 frame_sz = 32;
528                 func_str = "unknown";
529                 break;
530         }
531
532         pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
533                 ioc->name, desc, ioc_status, request_hdr, func_str);
534
535         _debug_dump_mf(request_hdr, frame_sz/4);
536 }
537
538 /**
539  * _base_display_event_data - verbose translation of firmware asyn events
540  * @ioc: per adapter object
541  * @mpi_reply: reply mf payload returned from firmware
542  *
543  * Return nothing.
544  */
545 static void
546 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
547         Mpi2EventNotificationReply_t *mpi_reply)
548 {
549         char *desc = NULL;
550         u16 event;
551
552         if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
553                 return;
554
555         event = le16_to_cpu(mpi_reply->Event);
556
557         switch (event) {
558         case MPI2_EVENT_LOG_DATA:
559                 desc = "Log Data";
560                 break;
561         case MPI2_EVENT_STATE_CHANGE:
562                 desc = "Status Change";
563                 break;
564         case MPI2_EVENT_HARD_RESET_RECEIVED:
565                 desc = "Hard Reset Received";
566                 break;
567         case MPI2_EVENT_EVENT_CHANGE:
568                 desc = "Event Change";
569                 break;
570         case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
571                 desc = "Device Status Change";
572                 break;
573         case MPI2_EVENT_IR_OPERATION_STATUS:
574                 desc = "IR Operation Status";
575                 break;
576         case MPI2_EVENT_SAS_DISCOVERY:
577         {
578                 Mpi2EventDataSasDiscovery_t *event_data =
579                     (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
580                 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
581                     (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
582                     "start" : "stop");
583                 if (event_data->DiscoveryStatus)
584                         pr_info("discovery_status(0x%08x)",
585                             le32_to_cpu(event_data->DiscoveryStatus));
586                         pr_info("\n");
587                 return;
588         }
589         case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
590                 desc = "SAS Broadcast Primitive";
591                 break;
592         case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
593                 desc = "SAS Init Device Status Change";
594                 break;
595         case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
596                 desc = "SAS Init Table Overflow";
597                 break;
598         case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
599                 desc = "SAS Topology Change List";
600                 break;
601         case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
602                 desc = "SAS Enclosure Device Status Change";
603                 break;
604         case MPI2_EVENT_IR_VOLUME:
605                 desc = "IR Volume";
606                 break;
607         case MPI2_EVENT_IR_PHYSICAL_DISK:
608                 desc = "IR Physical Disk";
609                 break;
610         case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
611                 desc = "IR Configuration Change List";
612                 break;
613         case MPI2_EVENT_LOG_ENTRY_ADDED:
614                 desc = "Log Entry Added";
615                 break;
616         }
617
618         if (!desc)
619                 return;
620
621         pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
622 }
623 #endif
624
625 /**
626  * _base_sas_log_info - verbose translation of firmware log info
627  * @ioc: per adapter object
628  * @log_info: log info
629  *
630  * Return nothing.
631  */
632 static void
633 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
634 {
635         union loginfo_type {
636                 u32     loginfo;
637                 struct {
638                         u32     subcode:16;
639                         u32     code:8;
640                         u32     originator:4;
641                         u32     bus_type:4;
642                 } dw;
643         };
644         union loginfo_type sas_loginfo;
645         char *originator_str = NULL;
646
647         sas_loginfo.loginfo = log_info;
648         if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
649                 return;
650
651         /* each nexus loss loginfo */
652         if (log_info == 0x31170000)
653                 return;
654
655         /* eat the loginfos associated with task aborts */
656         if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
657             0x31140000 || log_info == 0x31130000))
658                 return;
659
660         switch (sas_loginfo.dw.originator) {
661         case 0:
662                 originator_str = "IOP";
663                 break;
664         case 1:
665                 originator_str = "PL";
666                 break;
667         case 2:
668                 originator_str = "IR";
669                 break;
670         }
671
672         pr_warn(MPT3SAS_FMT
673                 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
674                 ioc->name, log_info,
675              originator_str, sas_loginfo.dw.code,
676              sas_loginfo.dw.subcode);
677 }
678
679 /**
680  * _base_display_reply_info -
681  * @ioc: per adapter object
682  * @smid: system request message index
683  * @msix_index: MSIX table index supplied by the OS
684  * @reply: reply message frame(lower 32bit addr)
685  *
686  * Return nothing.
687  */
688 static void
689 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
690         u32 reply)
691 {
692         MPI2DefaultReply_t *mpi_reply;
693         u16 ioc_status;
694         u32 loginfo = 0;
695
696         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
697         if (unlikely(!mpi_reply)) {
698                 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
699                     ioc->name, __FILE__, __LINE__, __func__);
700                 return;
701         }
702         ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
703 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
704         if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
705             (ioc->logging_level & MPT_DEBUG_REPLY)) {
706                 _base_sas_ioc_info(ioc , mpi_reply,
707                    mpt3sas_base_get_msg_frame(ioc, smid));
708         }
709 #endif
710         if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
711                 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
712                 _base_sas_log_info(ioc, loginfo);
713         }
714
715         if (ioc_status || loginfo) {
716                 ioc_status &= MPI2_IOCSTATUS_MASK;
717                 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
718         }
719 }
720
721 /**
722  * mpt3sas_base_done - base internal command completion routine
723  * @ioc: per adapter object
724  * @smid: system request message index
725  * @msix_index: MSIX table index supplied by the OS
726  * @reply: reply message frame(lower 32bit addr)
727  *
728  * Return 1 meaning mf should be freed from _base_interrupt
729  *        0 means the mf is freed from this function.
730  */
731 u8
732 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
733         u32 reply)
734 {
735         MPI2DefaultReply_t *mpi_reply;
736
737         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
738         if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
739                 return 1;
740
741         if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
742                 return 1;
743
744         ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
745         if (mpi_reply) {
746                 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
747                 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
748         }
749         ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
750
751         complete(&ioc->base_cmds.done);
752         return 1;
753 }
754
755 /**
756  * _base_async_event - main callback handler for firmware asyn events
757  * @ioc: per adapter object
758  * @msix_index: MSIX table index supplied by the OS
759  * @reply: reply message frame(lower 32bit addr)
760  *
761  * Return 1 meaning mf should be freed from _base_interrupt
762  *        0 means the mf is freed from this function.
763  */
764 static u8
765 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
766 {
767         Mpi2EventNotificationReply_t *mpi_reply;
768         Mpi2EventAckRequest_t *ack_request;
769         u16 smid;
770
771         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
772         if (!mpi_reply)
773                 return 1;
774         if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
775                 return 1;
776 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
777         _base_display_event_data(ioc, mpi_reply);
778 #endif
779         if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
780                 goto out;
781         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
782         if (!smid) {
783                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
784                     ioc->name, __func__);
785                 goto out;
786         }
787
788         ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
789         memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
790         ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
791         ack_request->Event = mpi_reply->Event;
792         ack_request->EventContext = mpi_reply->EventContext;
793         ack_request->VF_ID = 0;  /* TODO */
794         ack_request->VP_ID = 0;
795         mpt3sas_base_put_smid_default(ioc, smid);
796
797  out:
798
799         /* scsih callback handler */
800         mpt3sas_scsih_event_callback(ioc, msix_index, reply);
801
802         /* ctl callback handler */
803         mpt3sas_ctl_event_callback(ioc, msix_index, reply);
804
805         return 1;
806 }
807
808 /**
809  * _base_get_cb_idx - obtain the callback index
810  * @ioc: per adapter object
811  * @smid: system request message index
812  *
813  * Return callback index.
814  */
815 static u8
816 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
817 {
818         int i;
819         u8 cb_idx;
820
821         if (smid < ioc->hi_priority_smid) {
822                 i = smid - 1;
823                 cb_idx = ioc->scsi_lookup[i].cb_idx;
824         } else if (smid < ioc->internal_smid) {
825                 i = smid - ioc->hi_priority_smid;
826                 cb_idx = ioc->hpr_lookup[i].cb_idx;
827         } else if (smid <= ioc->hba_queue_depth) {
828                 i = smid - ioc->internal_smid;
829                 cb_idx = ioc->internal_lookup[i].cb_idx;
830         } else
831                 cb_idx = 0xFF;
832         return cb_idx;
833 }
834
835 /**
836  * _base_mask_interrupts - disable interrupts
837  * @ioc: per adapter object
838  *
839  * Disabling ResetIRQ, Reply and Doorbell Interrupts
840  *
841  * Return nothing.
842  */
843 static void
844 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
845 {
846         u32 him_register;
847
848         ioc->mask_interrupts = 1;
849         him_register = readl(&ioc->chip->HostInterruptMask);
850         him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
851         writel(him_register, &ioc->chip->HostInterruptMask);
852         readl(&ioc->chip->HostInterruptMask);
853 }
854
855 /**
856  * _base_unmask_interrupts - enable interrupts
857  * @ioc: per adapter object
858  *
859  * Enabling only Reply Interrupts
860  *
861  * Return nothing.
862  */
863 static void
864 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
865 {
866         u32 him_register;
867
868         him_register = readl(&ioc->chip->HostInterruptMask);
869         him_register &= ~MPI2_HIM_RIM;
870         writel(him_register, &ioc->chip->HostInterruptMask);
871         ioc->mask_interrupts = 0;
872 }
873
874 union reply_descriptor {
875         u64 word;
876         struct {
877                 u32 low;
878                 u32 high;
879         } u;
880 };
881
882 /**
883  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
884  * @irq: irq number (not used)
885  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
886  * @r: pt_regs pointer (not used)
887  *
888  * Return IRQ_HANDLE if processed, else IRQ_NONE.
889  */
890 static irqreturn_t
891 _base_interrupt(int irq, void *bus_id)
892 {
893         struct adapter_reply_queue *reply_q = bus_id;
894         union reply_descriptor rd;
895         u32 completed_cmds;
896         u8 request_desript_type;
897         u16 smid;
898         u8 cb_idx;
899         u32 reply;
900         u8 msix_index = reply_q->msix_index;
901         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
902         Mpi2ReplyDescriptorsUnion_t *rpf;
903         u8 rc;
904
905         if (ioc->mask_interrupts)
906                 return IRQ_NONE;
907
908         if (!atomic_add_unless(&reply_q->busy, 1, 1))
909                 return IRQ_NONE;
910
911         rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
912         request_desript_type = rpf->Default.ReplyFlags
913              & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
914         if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
915                 atomic_dec(&reply_q->busy);
916                 return IRQ_NONE;
917         }
918
919         completed_cmds = 0;
920         cb_idx = 0xFF;
921         do {
922                 rd.word = le64_to_cpu(rpf->Words);
923                 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
924                         goto out;
925                 reply = 0;
926                 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
927                 if (request_desript_type ==
928                     MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
929                     request_desript_type ==
930                     MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
931                         cb_idx = _base_get_cb_idx(ioc, smid);
932                         if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
933                             (likely(mpt_callbacks[cb_idx] != NULL))) {
934                                 rc = mpt_callbacks[cb_idx](ioc, smid,
935                                     msix_index, 0);
936                                 if (rc)
937                                         mpt3sas_base_free_smid(ioc, smid);
938                         }
939                 } else if (request_desript_type ==
940                     MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
941                         reply = le32_to_cpu(
942                             rpf->AddressReply.ReplyFrameAddress);
943                         if (reply > ioc->reply_dma_max_address ||
944                             reply < ioc->reply_dma_min_address)
945                                 reply = 0;
946                         if (smid) {
947                                 cb_idx = _base_get_cb_idx(ioc, smid);
948                                 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
949                                     (likely(mpt_callbacks[cb_idx] != NULL))) {
950                                         rc = mpt_callbacks[cb_idx](ioc, smid,
951                                             msix_index, reply);
952                                         if (reply)
953                                                 _base_display_reply_info(ioc,
954                                                     smid, msix_index, reply);
955                                         if (rc)
956                                                 mpt3sas_base_free_smid(ioc,
957                                                     smid);
958                                 }
959                         } else {
960                                 _base_async_event(ioc, msix_index, reply);
961                         }
962
963                         /* reply free queue handling */
964                         if (reply) {
965                                 ioc->reply_free_host_index =
966                                     (ioc->reply_free_host_index ==
967                                     (ioc->reply_free_queue_depth - 1)) ?
968                                     0 : ioc->reply_free_host_index + 1;
969                                 ioc->reply_free[ioc->reply_free_host_index] =
970                                     cpu_to_le32(reply);
971                                 wmb();
972                                 writel(ioc->reply_free_host_index,
973                                     &ioc->chip->ReplyFreeHostIndex);
974                         }
975                 }
976
977                 rpf->Words = cpu_to_le64(ULLONG_MAX);
978                 reply_q->reply_post_host_index =
979                     (reply_q->reply_post_host_index ==
980                     (ioc->reply_post_queue_depth - 1)) ? 0 :
981                     reply_q->reply_post_host_index + 1;
982                 request_desript_type =
983                     reply_q->reply_post_free[reply_q->reply_post_host_index].
984                     Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
985                 completed_cmds++;
986                 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
987                         goto out;
988                 if (!reply_q->reply_post_host_index)
989                         rpf = reply_q->reply_post_free;
990                 else
991                         rpf++;
992         } while (1);
993
994  out:
995
996         if (!completed_cmds) {
997                 atomic_dec(&reply_q->busy);
998                 return IRQ_NONE;
999         }
1000
1001         wmb();
1002         writel(reply_q->reply_post_host_index | (msix_index <<
1003             MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1004         atomic_dec(&reply_q->busy);
1005         return IRQ_HANDLED;
1006 }
1007
1008 /**
1009  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1010  * @ioc: per adapter object
1011  *
1012  */
1013 static inline int
1014 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1015 {
1016         return (ioc->facts.IOCCapabilities &
1017             MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1018 }
1019
1020 /**
1021  * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1022  * @ioc: per adapter object
1023  * Context: ISR conext
1024  *
1025  * Called when a Task Management request has completed. We want
1026  * to flush the other reply queues so all the outstanding IO has been
1027  * completed back to OS before we process the TM completetion.
1028  *
1029  * Return nothing.
1030  */
1031 void
1032 mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1033 {
1034         struct adapter_reply_queue *reply_q;
1035
1036         /* If MSIX capability is turned off
1037          * then multi-queues are not enabled
1038          */
1039         if (!_base_is_controller_msix_enabled(ioc))
1040                 return;
1041
1042         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1043                 if (ioc->shost_recovery)
1044                         return;
1045                 /* TMs are on msix_index == 0 */
1046                 if (reply_q->msix_index == 0)
1047                         continue;
1048                 _base_interrupt(reply_q->vector, (void *)reply_q);
1049         }
1050 }
1051
1052 /**
1053  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1054  * @cb_idx: callback index
1055  *
1056  * Return nothing.
1057  */
1058 void
1059 mpt3sas_base_release_callback_handler(u8 cb_idx)
1060 {
1061         mpt_callbacks[cb_idx] = NULL;
1062 }
1063
1064 /**
1065  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1066  * @cb_func: callback function
1067  *
1068  * Returns cb_func.
1069  */
1070 u8
1071 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1072 {
1073         u8 cb_idx;
1074
1075         for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1076                 if (mpt_callbacks[cb_idx] == NULL)
1077                         break;
1078
1079         mpt_callbacks[cb_idx] = cb_func;
1080         return cb_idx;
1081 }
1082
1083 /**
1084  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1085  *
1086  * Return nothing.
1087  */
1088 void
1089 mpt3sas_base_initialize_callback_handler(void)
1090 {
1091         u8 cb_idx;
1092
1093         for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1094                 mpt3sas_base_release_callback_handler(cb_idx);
1095 }
1096
1097
1098 /**
1099  * _base_build_zero_len_sge - build zero length sg entry
1100  * @ioc: per adapter object
1101  * @paddr: virtual address for SGE
1102  *
1103  * Create a zero length scatter gather entry to insure the IOCs hardware has
1104  * something to use if the target device goes brain dead and tries
1105  * to send data even when none is asked for.
1106  *
1107  * Return nothing.
1108  */
1109 static void
1110 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1111 {
1112         u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1113             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1114             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1115             MPI2_SGE_FLAGS_SHIFT);
1116         ioc->base_add_sg_single(paddr, flags_length, -1);
1117 }
1118
1119 /**
1120  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1121  * @paddr: virtual address for SGE
1122  * @flags_length: SGE flags and data transfer length
1123  * @dma_addr: Physical address
1124  *
1125  * Return nothing.
1126  */
1127 static void
1128 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1129 {
1130         Mpi2SGESimple32_t *sgel = paddr;
1131
1132         flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1133             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1134         sgel->FlagsLength = cpu_to_le32(flags_length);
1135         sgel->Address = cpu_to_le32(dma_addr);
1136 }
1137
1138
1139 /**
1140  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1141  * @paddr: virtual address for SGE
1142  * @flags_length: SGE flags and data transfer length
1143  * @dma_addr: Physical address
1144  *
1145  * Return nothing.
1146  */
1147 static void
1148 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1149 {
1150         Mpi2SGESimple64_t *sgel = paddr;
1151
1152         flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1153             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1154         sgel->FlagsLength = cpu_to_le32(flags_length);
1155         sgel->Address = cpu_to_le64(dma_addr);
1156 }
1157
1158 /**
1159  * _base_get_chain_buffer_tracker - obtain chain tracker
1160  * @ioc: per adapter object
1161  * @smid: smid associated to an IO request
1162  *
1163  * Returns chain tracker(from ioc->free_chain_list)
1164  */
1165 static struct chain_tracker *
1166 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1167 {
1168         struct chain_tracker *chain_req;
1169         unsigned long flags;
1170
1171         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1172         if (list_empty(&ioc->free_chain_list)) {
1173                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1174                 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1175                         "chain buffers not available\n", ioc->name));
1176                 return NULL;
1177         }
1178         chain_req = list_entry(ioc->free_chain_list.next,
1179             struct chain_tracker, tracker_list);
1180         list_del_init(&chain_req->tracker_list);
1181         list_add_tail(&chain_req->tracker_list,
1182             &ioc->scsi_lookup[smid - 1].chain_list);
1183         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1184         return chain_req;
1185 }
1186
1187
1188 /**
1189  * _base_build_sg - build generic sg
1190  * @ioc: per adapter object
1191  * @psge: virtual address for SGE
1192  * @data_out_dma: physical address for WRITES
1193  * @data_out_sz: data xfer size for WRITES
1194  * @data_in_dma: physical address for READS
1195  * @data_in_sz: data xfer size for READS
1196  *
1197  * Return nothing.
1198  */
1199 static void
1200 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1201         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1202         size_t data_in_sz)
1203 {
1204         u32 sgl_flags;
1205
1206         if (!data_out_sz && !data_in_sz) {
1207                 _base_build_zero_len_sge(ioc, psge);
1208                 return;
1209         }
1210
1211         if (data_out_sz && data_in_sz) {
1212                 /* WRITE sgel first */
1213                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1214                     MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1215                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1216                 ioc->base_add_sg_single(psge, sgl_flags |
1217                     data_out_sz, data_out_dma);
1218
1219                 /* incr sgel */
1220                 psge += ioc->sge_size;
1221
1222                 /* READ sgel last */
1223                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1224                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1225                     MPI2_SGE_FLAGS_END_OF_LIST);
1226                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1227                 ioc->base_add_sg_single(psge, sgl_flags |
1228                     data_in_sz, data_in_dma);
1229         } else if (data_out_sz) /* WRITE */ {
1230                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1231                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1232                     MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1233                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1234                 ioc->base_add_sg_single(psge, sgl_flags |
1235                     data_out_sz, data_out_dma);
1236         } else if (data_in_sz) /* READ */ {
1237                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1238                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1239                     MPI2_SGE_FLAGS_END_OF_LIST);
1240                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1241                 ioc->base_add_sg_single(psge, sgl_flags |
1242                     data_in_sz, data_in_dma);
1243         }
1244 }
1245
1246 /* IEEE format sgls */
1247
1248 /**
1249  * _base_add_sg_single_ieee - add sg element for IEEE format
1250  * @paddr: virtual address for SGE
1251  * @flags: SGE flags
1252  * @chain_offset: number of 128 byte elements from start of segment
1253  * @length: data transfer length
1254  * @dma_addr: Physical address
1255  *
1256  * Return nothing.
1257  */
1258 static void
1259 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1260         dma_addr_t dma_addr)
1261 {
1262         Mpi25IeeeSgeChain64_t *sgel = paddr;
1263
1264         sgel->Flags = flags;
1265         sgel->NextChainOffset = chain_offset;
1266         sgel->Length = cpu_to_le32(length);
1267         sgel->Address = cpu_to_le64(dma_addr);
1268 }
1269
1270 /**
1271  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1272  * @ioc: per adapter object
1273  * @paddr: virtual address for SGE
1274  *
1275  * Create a zero length scatter gather entry to insure the IOCs hardware has
1276  * something to use if the target device goes brain dead and tries
1277  * to send data even when none is asked for.
1278  *
1279  * Return nothing.
1280  */
1281 static void
1282 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1283 {
1284         u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1285                 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1286                 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
1287         _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1288 }
1289
1290 /**
1291  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1292  * @ioc: per adapter object
1293  * @scmd: scsi command
1294  * @smid: system request message index
1295  * Context: none.
1296  *
1297  * The main routine that builds scatter gather table from a given
1298  * scsi request sent via the .queuecommand main handler.
1299  *
1300  * Returns 0 success, anything else error
1301  */
1302 static int
1303 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1304         struct scsi_cmnd *scmd, u16 smid)
1305 {
1306         Mpi2SCSIIORequest_t *mpi_request;
1307         dma_addr_t chain_dma;
1308         struct scatterlist *sg_scmd;
1309         void *sg_local, *chain;
1310         u32 chain_offset;
1311         u32 chain_length;
1312         int sges_left;
1313         u32 sges_in_segment;
1314         u8 simple_sgl_flags;
1315         u8 simple_sgl_flags_last;
1316         u8 chain_sgl_flags;
1317         struct chain_tracker *chain_req;
1318
1319         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1320
1321         /* init scatter gather flags */
1322         simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1323             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1324         simple_sgl_flags_last = simple_sgl_flags |
1325             MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1326         chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1327             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1328
1329         sg_scmd = scsi_sglist(scmd);
1330         sges_left = scsi_dma_map(scmd);
1331         if (!sges_left) {
1332                 sdev_printk(KERN_ERR, scmd->device,
1333                         "pci_map_sg failed: request for %d bytes!\n",
1334                         scsi_bufflen(scmd));
1335                 return -ENOMEM;
1336         }
1337
1338         sg_local = &mpi_request->SGL;
1339         sges_in_segment = (ioc->request_sz -
1340             offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1341         if (sges_left <= sges_in_segment)
1342                 goto fill_in_last_segment;
1343
1344         mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1345             (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1346
1347         /* fill in main message segment when there is a chain following */
1348         while (sges_in_segment > 1) {
1349                 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1350                     sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1351                 sg_scmd = sg_next(sg_scmd);
1352                 sg_local += ioc->sge_size_ieee;
1353                 sges_left--;
1354                 sges_in_segment--;
1355         }
1356
1357         /* initializing the pointers */
1358         chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1359         if (!chain_req)
1360                 return -1;
1361         chain = chain_req->chain_buffer;
1362         chain_dma = chain_req->chain_buffer_dma;
1363         do {
1364                 sges_in_segment = (sges_left <=
1365                     ioc->max_sges_in_chain_message) ? sges_left :
1366                     ioc->max_sges_in_chain_message;
1367                 chain_offset = (sges_left == sges_in_segment) ?
1368                     0 : sges_in_segment;
1369                 chain_length = sges_in_segment * ioc->sge_size_ieee;
1370                 if (chain_offset)
1371                         chain_length += ioc->sge_size_ieee;
1372                 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1373                     chain_offset, chain_length, chain_dma);
1374
1375                 sg_local = chain;
1376                 if (!chain_offset)
1377                         goto fill_in_last_segment;
1378
1379                 /* fill in chain segments */
1380                 while (sges_in_segment) {
1381                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1382                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1383                         sg_scmd = sg_next(sg_scmd);
1384                         sg_local += ioc->sge_size_ieee;
1385                         sges_left--;
1386                         sges_in_segment--;
1387                 }
1388
1389                 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1390                 if (!chain_req)
1391                         return -1;
1392                 chain = chain_req->chain_buffer;
1393                 chain_dma = chain_req->chain_buffer_dma;
1394         } while (1);
1395
1396
1397  fill_in_last_segment:
1398
1399         /* fill the last segment */
1400         while (sges_left) {
1401                 if (sges_left == 1)
1402                         _base_add_sg_single_ieee(sg_local,
1403                             simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1404                             sg_dma_address(sg_scmd));
1405                 else
1406                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1407                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1408                 sg_scmd = sg_next(sg_scmd);
1409                 sg_local += ioc->sge_size_ieee;
1410                 sges_left--;
1411         }
1412
1413         return 0;
1414 }
1415
1416 /**
1417  * _base_build_sg_ieee - build generic sg for IEEE format
1418  * @ioc: per adapter object
1419  * @psge: virtual address for SGE
1420  * @data_out_dma: physical address for WRITES
1421  * @data_out_sz: data xfer size for WRITES
1422  * @data_in_dma: physical address for READS
1423  * @data_in_sz: data xfer size for READS
1424  *
1425  * Return nothing.
1426  */
1427 static void
1428 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1429         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1430         size_t data_in_sz)
1431 {
1432         u8 sgl_flags;
1433
1434         if (!data_out_sz && !data_in_sz) {
1435                 _base_build_zero_len_sge_ieee(ioc, psge);
1436                 return;
1437         }
1438
1439         if (data_out_sz && data_in_sz) {
1440                 /* WRITE sgel first */
1441                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1442                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1443                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1444                     data_out_dma);
1445
1446                 /* incr sgel */
1447                 psge += ioc->sge_size_ieee;
1448
1449                 /* READ sgel last */
1450                 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1451                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1452                     data_in_dma);
1453         } else if (data_out_sz) /* WRITE */ {
1454                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1455                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1456                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1457                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1458                     data_out_dma);
1459         } else if (data_in_sz) /* READ */ {
1460                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1461                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1462                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1463                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1464                     data_in_dma);
1465         }
1466 }
1467
1468 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1469
1470 /**
1471  * _base_config_dma_addressing - set dma addressing
1472  * @ioc: per adapter object
1473  * @pdev: PCI device struct
1474  *
1475  * Returns 0 for success, non-zero for failure.
1476  */
1477 static int
1478 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1479 {
1480         struct sysinfo s;
1481         char *desc = NULL;
1482
1483         if (sizeof(dma_addr_t) > 4) {
1484                 const uint64_t required_mask =
1485                     dma_get_required_mask(&pdev->dev);
1486                 if ((required_mask > DMA_BIT_MASK(32)) &&
1487                     !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1488                     !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1489                         ioc->base_add_sg_single = &_base_add_sg_single_64;
1490                         ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1491                         desc = "64";
1492                         goto out;
1493                 }
1494         }
1495
1496         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1497             && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1498                 ioc->base_add_sg_single = &_base_add_sg_single_32;
1499                 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1500                 desc = "32";
1501         } else
1502                 return -ENODEV;
1503
1504  out:
1505         si_meminfo(&s);
1506         pr_info(MPT3SAS_FMT
1507                 "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1508                 ioc->name, desc, convert_to_kb(s.totalram));
1509
1510         return 0;
1511 }
1512
1513 /**
1514  * _base_check_enable_msix - checks MSIX capabable.
1515  * @ioc: per adapter object
1516  *
1517  * Check to see if card is capable of MSIX, and set number
1518  * of available msix vectors
1519  */
1520 static int
1521 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1522 {
1523         int base;
1524         u16 message_control;
1525
1526         base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1527         if (!base) {
1528                 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1529                         ioc->name));
1530                 return -EINVAL;
1531         }
1532
1533         /* get msix vector count */
1534
1535         pci_read_config_word(ioc->pdev, base + 2, &message_control);
1536         ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1537         if (ioc->msix_vector_count > 8)
1538                 ioc->msix_vector_count = 8;
1539         dinitprintk(ioc, pr_info(MPT3SAS_FMT
1540                 "msix is supported, vector_count(%d)\n",
1541                 ioc->name, ioc->msix_vector_count));
1542         return 0;
1543 }
1544
1545 /**
1546  * _base_free_irq - free irq
1547  * @ioc: per adapter object
1548  *
1549  * Freeing respective reply_queue from the list.
1550  */
1551 static void
1552 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1553 {
1554         struct adapter_reply_queue *reply_q, *next;
1555
1556         if (list_empty(&ioc->reply_queue_list))
1557                 return;
1558
1559         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1560                 list_del(&reply_q->list);
1561                 synchronize_irq(reply_q->vector);
1562                 free_irq(reply_q->vector, reply_q);
1563                 kfree(reply_q);
1564         }
1565 }
1566
1567 /**
1568  * _base_request_irq - request irq
1569  * @ioc: per adapter object
1570  * @index: msix index into vector table
1571  * @vector: irq vector
1572  *
1573  * Inserting respective reply_queue into the list.
1574  */
1575 static int
1576 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1577 {
1578         struct adapter_reply_queue *reply_q;
1579         int r;
1580
1581         reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1582         if (!reply_q) {
1583                 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1584                     ioc->name, (int)sizeof(struct adapter_reply_queue));
1585                 return -ENOMEM;
1586         }
1587         reply_q->ioc = ioc;
1588         reply_q->msix_index = index;
1589         reply_q->vector = vector;
1590         atomic_set(&reply_q->busy, 0);
1591         if (ioc->msix_enable)
1592                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1593                     MPT3SAS_DRIVER_NAME, ioc->id, index);
1594         else
1595                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1596                     MPT3SAS_DRIVER_NAME, ioc->id);
1597         r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1598             reply_q);
1599         if (r) {
1600                 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1601                     reply_q->name, vector);
1602                 kfree(reply_q);
1603                 return -EBUSY;
1604         }
1605
1606         INIT_LIST_HEAD(&reply_q->list);
1607         list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1608         return 0;
1609 }
1610
1611 /**
1612  * _base_assign_reply_queues - assigning msix index for each cpu
1613  * @ioc: per adapter object
1614  *
1615  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1616  *
1617  * It would nice if we could call irq_set_affinity, however it is not
1618  * an exported symbol
1619  */
1620 static void
1621 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1622 {
1623         struct adapter_reply_queue *reply_q;
1624         int cpu_id;
1625         int cpu_grouping, loop, grouping, grouping_mod;
1626         int reply_queue;
1627
1628         if (!_base_is_controller_msix_enabled(ioc))
1629                 return;
1630
1631         memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1632
1633         /* NUMA Hardware bug workaround - drop to less reply queues */
1634         if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
1635                 ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
1636                 reply_queue = 0;
1637                 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1638                         reply_q->msix_index = reply_queue;
1639                         if (++reply_queue == ioc->reply_queue_count)
1640                                 reply_queue = 0;
1641                 }
1642         }
1643
1644         /* when there are more cpus than available msix vectors,
1645          * then group cpus togeather on same irq
1646          */
1647         if (ioc->cpu_count > ioc->msix_vector_count) {
1648                 grouping = ioc->cpu_count / ioc->msix_vector_count;
1649                 grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1650                 if (grouping < 2 || (grouping == 2 && !grouping_mod))
1651                         cpu_grouping = 2;
1652                 else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1653                         cpu_grouping = 4;
1654                 else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1655                         cpu_grouping = 8;
1656                 else
1657                         cpu_grouping = 16;
1658         } else
1659                 cpu_grouping = 0;
1660
1661         loop = 0;
1662         reply_q = list_entry(ioc->reply_queue_list.next,
1663              struct adapter_reply_queue, list);
1664         for_each_online_cpu(cpu_id) {
1665                 if (!cpu_grouping) {
1666                         ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1667                         reply_q = list_entry(reply_q->list.next,
1668                             struct adapter_reply_queue, list);
1669                 } else {
1670                         if (loop < cpu_grouping) {
1671                                 ioc->cpu_msix_table[cpu_id] =
1672                                     reply_q->msix_index;
1673                                 loop++;
1674                         } else {
1675                                 reply_q = list_entry(reply_q->list.next,
1676                                     struct adapter_reply_queue, list);
1677                                 ioc->cpu_msix_table[cpu_id] =
1678                                     reply_q->msix_index;
1679                                 loop = 1;
1680                         }
1681                 }
1682         }
1683 }
1684
1685 /**
1686  * _base_disable_msix - disables msix
1687  * @ioc: per adapter object
1688  *
1689  */
1690 static void
1691 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1692 {
1693         if (!ioc->msix_enable)
1694                 return;
1695         pci_disable_msix(ioc->pdev);
1696         ioc->msix_enable = 0;
1697 }
1698
1699 /**
1700  * _base_enable_msix - enables msix, failback to io_apic
1701  * @ioc: per adapter object
1702  *
1703  */
1704 static int
1705 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1706 {
1707         struct msix_entry *entries, *a;
1708         int r;
1709         int i;
1710         u8 try_msix = 0;
1711
1712         INIT_LIST_HEAD(&ioc->reply_queue_list);
1713
1714         if (msix_disable == -1 || msix_disable == 0)
1715                 try_msix = 1;
1716
1717         if (!try_msix)
1718                 goto try_ioapic;
1719
1720         if (_base_check_enable_msix(ioc) != 0)
1721                 goto try_ioapic;
1722
1723         ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1724             ioc->msix_vector_count);
1725
1726         entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1727             GFP_KERNEL);
1728         if (!entries) {
1729                 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1730                         "kcalloc failed @ at %s:%d/%s() !!!\n",
1731                         ioc->name, __FILE__, __LINE__, __func__));
1732                 goto try_ioapic;
1733         }
1734
1735         for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1736                 a->entry = i;
1737
1738         r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1739         if (r) {
1740                 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1741                         "pci_enable_msix failed (r=%d) !!!\n",
1742                         ioc->name, r));
1743                 kfree(entries);
1744                 goto try_ioapic;
1745         }
1746
1747         ioc->msix_enable = 1;
1748         for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1749                 r = _base_request_irq(ioc, i, a->vector);
1750                 if (r) {
1751                         _base_free_irq(ioc);
1752                         _base_disable_msix(ioc);
1753                         kfree(entries);
1754                         goto try_ioapic;
1755                 }
1756         }
1757
1758         kfree(entries);
1759         return 0;
1760
1761 /* failback to io_apic interrupt routing */
1762  try_ioapic:
1763
1764         r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1765
1766         return r;
1767 }
1768
1769 /**
1770  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
1771  * @ioc: per adapter object
1772  *
1773  * Returns 0 for success, non-zero for failure.
1774  */
1775 int
1776 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1777 {
1778         struct pci_dev *pdev = ioc->pdev;
1779         u32 memap_sz;
1780         u32 pio_sz;
1781         int i, r = 0;
1782         u64 pio_chip = 0;
1783         u64 chip_phys = 0;
1784         struct adapter_reply_queue *reply_q;
1785
1786         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
1787             ioc->name, __func__));
1788
1789         ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1790         if (pci_enable_device_mem(pdev)) {
1791                 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
1792                         ioc->name);
1793                 return -ENODEV;
1794         }
1795
1796
1797         if (pci_request_selected_regions(pdev, ioc->bars,
1798             MPT3SAS_DRIVER_NAME)) {
1799                 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
1800                         ioc->name);
1801                 r = -ENODEV;
1802                 goto out_fail;
1803         }
1804
1805 /* AER (Advanced Error Reporting) hooks */
1806         pci_enable_pcie_error_reporting(pdev);
1807
1808         pci_set_master(pdev);
1809
1810
1811         if (_base_config_dma_addressing(ioc, pdev) != 0) {
1812                 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
1813                     ioc->name, pci_name(pdev));
1814                 r = -ENODEV;
1815                 goto out_fail;
1816         }
1817
1818         for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1819                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1820                         if (pio_sz)
1821                                 continue;
1822                         pio_chip = (u64)pci_resource_start(pdev, i);
1823                         pio_sz = pci_resource_len(pdev, i);
1824                 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1825                         if (memap_sz)
1826                                 continue;
1827                         ioc->chip_phys = pci_resource_start(pdev, i);
1828                         chip_phys = (u64)ioc->chip_phys;
1829                         memap_sz = pci_resource_len(pdev, i);
1830                         ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1831                         if (ioc->chip == NULL) {
1832                                 pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
1833                                         ioc->name);
1834                                 r = -EINVAL;
1835                                 goto out_fail;
1836                         }
1837                 }
1838         }
1839
1840         _base_mask_interrupts(ioc);
1841         r = _base_enable_msix(ioc);
1842         if (r)
1843                 goto out_fail;
1844
1845         list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1846                 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
1847                     reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1848                     "IO-APIC enabled"), reply_q->vector);
1849
1850         pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1851             ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1852         pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
1853             ioc->name, (unsigned long long)pio_chip, pio_sz);
1854
1855         /* Save PCI configuration state for recovery from PCI AER/EEH errors */
1856         pci_save_state(pdev);
1857         return 0;
1858
1859  out_fail:
1860         if (ioc->chip_phys)
1861                 iounmap(ioc->chip);
1862         ioc->chip_phys = 0;
1863         pci_release_selected_regions(ioc->pdev, ioc->bars);
1864         pci_disable_pcie_error_reporting(pdev);
1865         pci_disable_device(pdev);
1866         return r;
1867 }
1868
1869 /**
1870  * mpt3sas_base_get_msg_frame - obtain request mf pointer
1871  * @ioc: per adapter object
1872  * @smid: system request message index(smid zero is invalid)
1873  *
1874  * Returns virt pointer to message frame.
1875  */
1876 void *
1877 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1878 {
1879         return (void *)(ioc->request + (smid * ioc->request_sz));
1880 }
1881
1882 /**
1883  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
1884  * @ioc: per adapter object
1885  * @smid: system request message index
1886  *
1887  * Returns virt pointer to sense buffer.
1888  */
1889 void *
1890 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1891 {
1892         return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1893 }
1894
1895 /**
1896  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
1897  * @ioc: per adapter object
1898  * @smid: system request message index
1899  *
1900  * Returns phys pointer to the low 32bit address of the sense buffer.
1901  */
1902 __le32
1903 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1904 {
1905         return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
1906             SCSI_SENSE_BUFFERSIZE));
1907 }
1908
1909 /**
1910  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
1911  * @ioc: per adapter object
1912  * @phys_addr: lower 32 physical addr of the reply
1913  *
1914  * Converts 32bit lower physical addr into a virt address.
1915  */
1916 void *
1917 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
1918 {
1919         if (!phys_addr)
1920                 return NULL;
1921         return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1922 }
1923
1924 /**
1925  * mpt3sas_base_get_smid - obtain a free smid from internal queue
1926  * @ioc: per adapter object
1927  * @cb_idx: callback index
1928  *
1929  * Returns smid (zero is invalid)
1930  */
1931 u16
1932 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
1933 {
1934         unsigned long flags;
1935         struct request_tracker *request;
1936         u16 smid;
1937
1938         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1939         if (list_empty(&ioc->internal_free_list)) {
1940                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1941                 pr_err(MPT3SAS_FMT "%s: smid not available\n",
1942                     ioc->name, __func__);
1943                 return 0;
1944         }
1945
1946         request = list_entry(ioc->internal_free_list.next,
1947             struct request_tracker, tracker_list);
1948         request->cb_idx = cb_idx;
1949         smid = request->smid;
1950         list_del(&request->tracker_list);
1951         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1952         return smid;
1953 }
1954
1955 /**
1956  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1957  * @ioc: per adapter object
1958  * @cb_idx: callback index
1959  * @scmd: pointer to scsi command object
1960  *
1961  * Returns smid (zero is invalid)
1962  */
1963 u16
1964 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
1965         struct scsi_cmnd *scmd)
1966 {
1967         unsigned long flags;
1968         struct scsiio_tracker *request;
1969         u16 smid;
1970
1971         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1972         if (list_empty(&ioc->free_list)) {
1973                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1974                 pr_err(MPT3SAS_FMT "%s: smid not available\n",
1975                     ioc->name, __func__);
1976                 return 0;
1977         }
1978
1979         request = list_entry(ioc->free_list.next,
1980             struct scsiio_tracker, tracker_list);
1981         request->scmd = scmd;
1982         request->cb_idx = cb_idx;
1983         smid = request->smid;
1984         list_del(&request->tracker_list);
1985         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1986         return smid;
1987 }
1988
1989 /**
1990  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1991  * @ioc: per adapter object
1992  * @cb_idx: callback index
1993  *
1994  * Returns smid (zero is invalid)
1995  */
1996 u16
1997 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
1998 {
1999         unsigned long flags;
2000         struct request_tracker *request;
2001         u16 smid;
2002
2003         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2004         if (list_empty(&ioc->hpr_free_list)) {
2005                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2006                 return 0;
2007         }
2008
2009         request = list_entry(ioc->hpr_free_list.next,
2010             struct request_tracker, tracker_list);
2011         request->cb_idx = cb_idx;
2012         smid = request->smid;
2013         list_del(&request->tracker_list);
2014         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2015         return smid;
2016 }
2017
2018 /**
2019  * mpt3sas_base_free_smid - put smid back on free_list
2020  * @ioc: per adapter object
2021  * @smid: system request message index
2022  *
2023  * Return nothing.
2024  */
2025 void
2026 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2027 {
2028         unsigned long flags;
2029         int i;
2030         struct chain_tracker *chain_req, *next;
2031
2032         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2033         if (smid < ioc->hi_priority_smid) {
2034                 /* scsiio queue */
2035                 i = smid - 1;
2036                 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2037                         list_for_each_entry_safe(chain_req, next,
2038                             &ioc->scsi_lookup[i].chain_list, tracker_list) {
2039                                 list_del_init(&chain_req->tracker_list);
2040                                 list_add(&chain_req->tracker_list,
2041                                     &ioc->free_chain_list);
2042                         }
2043                 }
2044                 ioc->scsi_lookup[i].cb_idx = 0xFF;
2045                 ioc->scsi_lookup[i].scmd = NULL;
2046                 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2047                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2048
2049                 /*
2050                  * See _wait_for_commands_to_complete() call with regards
2051                  * to this code.
2052                  */
2053                 if (ioc->shost_recovery && ioc->pending_io_count) {
2054                         if (ioc->pending_io_count == 1)
2055                                 wake_up(&ioc->reset_wq);
2056                         ioc->pending_io_count--;
2057                 }
2058                 return;
2059         } else if (smid < ioc->internal_smid) {
2060                 /* hi-priority */
2061                 i = smid - ioc->hi_priority_smid;
2062                 ioc->hpr_lookup[i].cb_idx = 0xFF;
2063                 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2064         } else if (smid <= ioc->hba_queue_depth) {
2065                 /* internal queue */
2066                 i = smid - ioc->internal_smid;
2067                 ioc->internal_lookup[i].cb_idx = 0xFF;
2068                 list_add(&ioc->internal_lookup[i].tracker_list,
2069                     &ioc->internal_free_list);
2070         }
2071         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2072 }
2073
2074 /**
2075  * _base_writeq - 64 bit write to MMIO
2076  * @ioc: per adapter object
2077  * @b: data payload
2078  * @addr: address in MMIO space
2079  * @writeq_lock: spin lock
2080  *
2081  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2082  * care of 32 bit environment where its not quarenteed to send the entire word
2083  * in one transfer.
2084  */
2085 #if defined(writeq) && defined(CONFIG_64BIT)
2086 static inline void
2087 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2088 {
2089         writeq(cpu_to_le64(b), addr);
2090 }
2091 #else
2092 static inline void
2093 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2094 {
2095         unsigned long flags;
2096         __u64 data_out = cpu_to_le64(b);
2097
2098         spin_lock_irqsave(writeq_lock, flags);
2099         writel((u32)(data_out), addr);
2100         writel((u32)(data_out >> 32), (addr + 4));
2101         spin_unlock_irqrestore(writeq_lock, flags);
2102 }
2103 #endif
2104
2105 static inline u8
2106 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2107 {
2108         return ioc->cpu_msix_table[raw_smp_processor_id()];
2109 }
2110
2111 /**
2112  * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2113  * @ioc: per adapter object
2114  * @smid: system request message index
2115  * @handle: device handle
2116  *
2117  * Return nothing.
2118  */
2119 void
2120 mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2121 {
2122         Mpi2RequestDescriptorUnion_t descriptor;
2123         u64 *request = (u64 *)&descriptor;
2124
2125
2126         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2127         descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
2128         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2129         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2130         descriptor.SCSIIO.LMID = 0;
2131         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2132             &ioc->scsi_lookup_lock);
2133 }
2134
2135 /**
2136  * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2137  * @ioc: per adapter object
2138  * @smid: system request message index
2139  * @handle: device handle
2140  *
2141  * Return nothing.
2142  */
2143 void
2144 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2145         u16 handle)
2146 {
2147         Mpi2RequestDescriptorUnion_t descriptor;
2148         u64 *request = (u64 *)&descriptor;
2149
2150         descriptor.SCSIIO.RequestFlags =
2151             MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2152         descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2153         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2154         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2155         descriptor.SCSIIO.LMID = 0;
2156         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2157             &ioc->scsi_lookup_lock);
2158 }
2159
2160 /**
2161  * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2162  * @ioc: per adapter object
2163  * @smid: system request message index
2164  *
2165  * Return nothing.
2166  */
2167 void
2168 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2169 {
2170         Mpi2RequestDescriptorUnion_t descriptor;
2171         u64 *request = (u64 *)&descriptor;
2172
2173         descriptor.HighPriority.RequestFlags =
2174             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2175         descriptor.HighPriority.MSIxIndex =  0;
2176         descriptor.HighPriority.SMID = cpu_to_le16(smid);
2177         descriptor.HighPriority.LMID = 0;
2178         descriptor.HighPriority.Reserved1 = 0;
2179         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2180             &ioc->scsi_lookup_lock);
2181 }
2182
2183 /**
2184  * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2185  * @ioc: per adapter object
2186  * @smid: system request message index
2187  *
2188  * Return nothing.
2189  */
2190 void
2191 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2192 {
2193         Mpi2RequestDescriptorUnion_t descriptor;
2194         u64 *request = (u64 *)&descriptor;
2195
2196         descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2197         descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
2198         descriptor.Default.SMID = cpu_to_le16(smid);
2199         descriptor.Default.LMID = 0;
2200         descriptor.Default.DescriptorTypeDependent = 0;
2201         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2202             &ioc->scsi_lookup_lock);
2203 }
2204
2205
2206
2207 /**
2208  * _base_display_ioc_capabilities - Disply IOC's capabilities.
2209  * @ioc: per adapter object
2210  *
2211  * Return nothing.
2212  */
2213 static void
2214 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2215 {
2216         int i = 0;
2217         char desc[16];
2218         u32 iounit_pg1_flags;
2219         u32 bios_version;
2220
2221         bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2222         strncpy(desc, ioc->manu_pg0.ChipName, 16);
2223         pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2224            "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2225             ioc->name, desc,
2226            (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2227            (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2228            (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2229            ioc->facts.FWVersion.Word & 0x000000FF,
2230            ioc->pdev->revision,
2231            (bios_version & 0xFF000000) >> 24,
2232            (bios_version & 0x00FF0000) >> 16,
2233            (bios_version & 0x0000FF00) >> 8,
2234             bios_version & 0x000000FF);
2235
2236         pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2237
2238         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2239                 pr_info("Initiator");
2240                 i++;
2241         }
2242
2243         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2244                 pr_info("%sTarget", i ? "," : "");
2245                 i++;
2246         }
2247
2248         i = 0;
2249         pr_info("), ");
2250         pr_info("Capabilities=(");
2251
2252         if (ioc->facts.IOCCapabilities &
2253                     MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2254                         pr_info("Raid");
2255                         i++;
2256         }
2257
2258         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2259                 pr_info("%sTLR", i ? "," : "");
2260                 i++;
2261         }
2262
2263         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2264                 pr_info("%sMulticast", i ? "," : "");
2265                 i++;
2266         }
2267
2268         if (ioc->facts.IOCCapabilities &
2269             MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2270                 pr_info("%sBIDI Target", i ? "," : "");
2271                 i++;
2272         }
2273
2274         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2275                 pr_info("%sEEDP", i ? "," : "");
2276                 i++;
2277         }
2278
2279         if (ioc->facts.IOCCapabilities &
2280             MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2281                 pr_info("%sSnapshot Buffer", i ? "," : "");
2282                 i++;
2283         }
2284
2285         if (ioc->facts.IOCCapabilities &
2286             MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2287                 pr_info("%sDiag Trace Buffer", i ? "," : "");
2288                 i++;
2289         }
2290
2291         if (ioc->facts.IOCCapabilities &
2292             MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2293                 pr_info("%sDiag Extended Buffer", i ? "," : "");
2294                 i++;
2295         }
2296
2297         if (ioc->facts.IOCCapabilities &
2298             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2299                 pr_info("%sTask Set Full", i ? "," : "");
2300                 i++;
2301         }
2302
2303         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2304         if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2305                 pr_info("%sNCQ", i ? "," : "");
2306                 i++;
2307         }
2308
2309         pr_info(")\n");
2310 }
2311
2312 /**
2313  * mpt3sas_base_update_missing_delay - change the missing delay timers
2314  * @ioc: per adapter object
2315  * @device_missing_delay: amount of time till device is reported missing
2316  * @io_missing_delay: interval IO is returned when there is a missing device
2317  *
2318  * Return nothing.
2319  *
2320  * Passed on the command line, this function will modify the device missing
2321  * delay, as well as the io missing delay. This should be called at driver
2322  * load time.
2323  */
2324 void
2325 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2326         u16 device_missing_delay, u8 io_missing_delay)
2327 {
2328         u16 dmd, dmd_new, dmd_orignal;
2329         u8 io_missing_delay_original;
2330         u16 sz;
2331         Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2332         Mpi2ConfigReply_t mpi_reply;
2333         u8 num_phys = 0;
2334         u16 ioc_status;
2335
2336         mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2337         if (!num_phys)
2338                 return;
2339
2340         sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2341             sizeof(Mpi2SasIOUnit1PhyData_t));
2342         sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2343         if (!sas_iounit_pg1) {
2344                 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2345                     ioc->name, __FILE__, __LINE__, __func__);
2346                 goto out;
2347         }
2348         if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2349             sas_iounit_pg1, sz))) {
2350                 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2351                     ioc->name, __FILE__, __LINE__, __func__);
2352                 goto out;
2353         }
2354         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2355             MPI2_IOCSTATUS_MASK;
2356         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2357                 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2358                     ioc->name, __FILE__, __LINE__, __func__);
2359                 goto out;
2360         }
2361
2362         /* device missing delay */
2363         dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2364         if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2365                 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2366         else
2367                 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2368         dmd_orignal = dmd;
2369         if (device_missing_delay > 0x7F) {
2370                 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2371                     device_missing_delay;
2372                 dmd = dmd / 16;
2373                 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2374         } else
2375                 dmd = device_missing_delay;
2376         sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2377
2378         /* io missing delay */
2379         io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2380         sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2381
2382         if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2383             sz)) {
2384                 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2385                         dmd_new = (dmd &
2386                             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2387                 else
2388                         dmd_new =
2389                     dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2390                 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2391                         ioc->name, dmd_orignal, dmd_new);
2392                 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
2393                         ioc->name, io_missing_delay_original,
2394                     io_missing_delay);
2395                 ioc->device_missing_delay = dmd_new;
2396                 ioc->io_missing_delay = io_missing_delay;
2397         }
2398
2399 out:
2400         kfree(sas_iounit_pg1);
2401 }
2402 /**
2403  * _base_static_config_pages - static start of day config pages
2404  * @ioc: per adapter object
2405  *
2406  * Return nothing.
2407  */
2408 static void
2409 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
2410 {
2411         Mpi2ConfigReply_t mpi_reply;
2412         u32 iounit_pg1_flags;
2413
2414         mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2415         if (ioc->ir_firmware)
2416                 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2417                     &ioc->manu_pg10);
2418
2419         /*
2420          * Ensure correct T10 PI operation if vendor left EEDPTagMode
2421          * flag unset in NVDATA.
2422          */
2423         mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
2424         if (ioc->manu_pg11.EEDPTagMode == 0) {
2425                 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
2426                     ioc->name);
2427                 ioc->manu_pg11.EEDPTagMode &= ~0x3;
2428                 ioc->manu_pg11.EEDPTagMode |= 0x1;
2429                 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
2430                     &ioc->manu_pg11);
2431         }
2432
2433         mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2434         mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2435         mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2436         mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2437         mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2438         _base_display_ioc_capabilities(ioc);
2439
2440         /*
2441          * Enable task_set_full handling in iounit_pg1 when the
2442          * facts capabilities indicate that its supported.
2443          */
2444         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2445         if ((ioc->facts.IOCCapabilities &
2446             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2447                 iounit_pg1_flags &=
2448                     ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2449         else
2450                 iounit_pg1_flags |=
2451                     MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2452         ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2453         mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2454 }
2455
2456 /**
2457  * _base_release_memory_pools - release memory
2458  * @ioc: per adapter object
2459  *
2460  * Free memory allocated from _base_allocate_memory_pools.
2461  *
2462  * Return nothing.
2463  */
2464 static void
2465 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2466 {
2467         int i;
2468
2469         dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2470             __func__));
2471
2472         if (ioc->request) {
2473                 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2474                     ioc->request,  ioc->request_dma);
2475                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2476                         "request_pool(0x%p): free\n",
2477                         ioc->name, ioc->request));
2478                 ioc->request = NULL;
2479         }
2480
2481         if (ioc->sense) {
2482                 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2483                 if (ioc->sense_dma_pool)
2484                         pci_pool_destroy(ioc->sense_dma_pool);
2485                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2486                         "sense_pool(0x%p): free\n",
2487                         ioc->name, ioc->sense));
2488                 ioc->sense = NULL;
2489         }
2490
2491         if (ioc->reply) {
2492                 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2493                 if (ioc->reply_dma_pool)
2494                         pci_pool_destroy(ioc->reply_dma_pool);
2495                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2496                         "reply_pool(0x%p): free\n",
2497                         ioc->name, ioc->reply));
2498                 ioc->reply = NULL;
2499         }
2500
2501         if (ioc->reply_free) {
2502                 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2503                     ioc->reply_free_dma);
2504                 if (ioc->reply_free_dma_pool)
2505                         pci_pool_destroy(ioc->reply_free_dma_pool);
2506                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2507                         "reply_free_pool(0x%p): free\n",
2508                         ioc->name, ioc->reply_free));
2509                 ioc->reply_free = NULL;
2510         }
2511
2512         if (ioc->reply_post_free) {
2513                 pci_pool_free(ioc->reply_post_free_dma_pool,
2514                     ioc->reply_post_free, ioc->reply_post_free_dma);
2515                 if (ioc->reply_post_free_dma_pool)
2516                         pci_pool_destroy(ioc->reply_post_free_dma_pool);
2517                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2518                     "reply_post_free_pool(0x%p): free\n", ioc->name,
2519                     ioc->reply_post_free));
2520                 ioc->reply_post_free = NULL;
2521         }
2522
2523         if (ioc->config_page) {
2524                 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2525                     "config_page(0x%p): free\n", ioc->name,
2526                     ioc->config_page));
2527                 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2528                     ioc->config_page, ioc->config_page_dma);
2529         }
2530
2531         if (ioc->scsi_lookup) {
2532                 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2533                 ioc->scsi_lookup = NULL;
2534         }
2535         kfree(ioc->hpr_lookup);
2536         kfree(ioc->internal_lookup);
2537         if (ioc->chain_lookup) {
2538                 for (i = 0; i < ioc->chain_depth; i++) {
2539                         if (ioc->chain_lookup[i].chain_buffer)
2540                                 pci_pool_free(ioc->chain_dma_pool,
2541                                     ioc->chain_lookup[i].chain_buffer,
2542                                     ioc->chain_lookup[i].chain_buffer_dma);
2543                 }
2544                 if (ioc->chain_dma_pool)
2545                         pci_pool_destroy(ioc->chain_dma_pool);
2546                 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2547                 ioc->chain_lookup = NULL;
2548         }
2549 }
2550
2551 /**
2552  * _base_allocate_memory_pools - allocate start of day memory pools
2553  * @ioc: per adapter object
2554  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2555  *
2556  * Returns 0 success, anything else error
2557  */
2558 static int
2559 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
2560 {
2561         struct mpt3sas_facts *facts;
2562         u16 max_sge_elements;
2563         u16 chains_needed_per_io;
2564         u32 sz, total_sz, reply_post_free_sz;
2565         u32 retry_sz;
2566         u16 max_request_credit;
2567         unsigned short sg_tablesize;
2568         u16 sge_size;
2569         int i;
2570
2571         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2572             __func__));
2573
2574
2575         retry_sz = 0;
2576         facts = &ioc->facts;
2577
2578         /* command line tunables for max sgl entries */
2579         if (max_sgl_entries != -1)
2580                 sg_tablesize = max_sgl_entries;
2581         else
2582                 sg_tablesize = MPT3SAS_SG_DEPTH;
2583
2584         if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
2585                 sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
2586         else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
2587                 sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
2588         ioc->shost->sg_tablesize = sg_tablesize;
2589
2590         ioc->hi_priority_depth = facts->HighPriorityCredit;
2591         ioc->internal_depth = ioc->hi_priority_depth + (5);
2592         /* command line tunables  for max controller queue depth */
2593         if (max_queue_depth != -1 && max_queue_depth != 0) {
2594                 max_request_credit = min_t(u16, max_queue_depth +
2595                     ioc->hi_priority_depth + ioc->internal_depth,
2596                     facts->RequestCredit);
2597                 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2598                         max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2599         } else
2600                 max_request_credit = min_t(u16, facts->RequestCredit,
2601                     MAX_HBA_QUEUE_DEPTH);
2602
2603         ioc->hba_queue_depth = max_request_credit;
2604
2605         /* request frame size */
2606         ioc->request_sz = facts->IOCRequestFrameSize * 4;
2607
2608         /* reply frame size */
2609         ioc->reply_sz = facts->ReplyFrameSize * 4;
2610
2611         /* calculate the max scatter element size */
2612         sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
2613
2614  retry_allocation:
2615         total_sz = 0;
2616         /* calculate number of sg elements left over in the 1st frame */
2617         max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2618             sizeof(Mpi2SGEIOUnion_t)) + sge_size);
2619         ioc->max_sges_in_main_message = max_sge_elements/sge_size;
2620
2621         /* now do the same for a chain buffer */
2622         max_sge_elements = ioc->request_sz - sge_size;
2623         ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
2624
2625         /*
2626          *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2627          */
2628         chains_needed_per_io = ((ioc->shost->sg_tablesize -
2629            ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2630             + 1;
2631         if (chains_needed_per_io > facts->MaxChainDepth) {
2632                 chains_needed_per_io = facts->MaxChainDepth;
2633                 ioc->shost->sg_tablesize = min_t(u16,
2634                 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2635                 * chains_needed_per_io), ioc->shost->sg_tablesize);
2636         }
2637         ioc->chains_needed_per_io = chains_needed_per_io;
2638
2639         /* reply free queue sizing - taking into account for 64 FW events */
2640         ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2641
2642         /* calculate reply descriptor post queue depth */
2643         ioc->reply_post_queue_depth = ioc->hba_queue_depth +
2644                                 ioc->reply_free_queue_depth +  1 ;
2645         /* align the reply post queue on the next 16 count boundary */
2646         if (ioc->reply_post_queue_depth % 16)
2647                 ioc->reply_post_queue_depth += 16 -
2648                 (ioc->reply_post_queue_depth % 16);
2649
2650
2651         if (ioc->reply_post_queue_depth >
2652             facts->MaxReplyDescriptorPostQueueDepth) {
2653                 ioc->reply_post_queue_depth =
2654                                 facts->MaxReplyDescriptorPostQueueDepth -
2655                     (facts->MaxReplyDescriptorPostQueueDepth % 16);
2656                 ioc->hba_queue_depth =
2657                                 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
2658                 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2659         }
2660
2661         dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
2662             "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2663             "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2664             ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2665             ioc->chains_needed_per_io));
2666
2667         ioc->scsiio_depth = ioc->hba_queue_depth -
2668             ioc->hi_priority_depth - ioc->internal_depth;
2669
2670         /* set the scsi host can_queue depth
2671          * with some internal commands that could be outstanding
2672          */
2673         ioc->shost->can_queue = ioc->scsiio_depth;
2674         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2675                 "scsi host: can_queue depth (%d)\n",
2676                 ioc->name, ioc->shost->can_queue));
2677
2678
2679         /* contiguous pool for request and chains, 16 byte align, one extra "
2680          * "frame for smid=0
2681          */
2682         ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2683         sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2684
2685         /* hi-priority queue */
2686         sz += (ioc->hi_priority_depth * ioc->request_sz);
2687
2688         /* internal queue */
2689         sz += (ioc->internal_depth * ioc->request_sz);
2690
2691         ioc->request_dma_sz = sz;
2692         ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2693         if (!ioc->request) {
2694                 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
2695                     "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2696                     "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2697                     ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2698                 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
2699                         goto out;
2700                 retry_sz += 64;
2701                 ioc->hba_queue_depth = max_request_credit - retry_sz;
2702                 goto retry_allocation;
2703         }
2704
2705         if (retry_sz)
2706                 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
2707                     "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2708                     "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2709                     ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2710
2711         /* hi-priority queue */
2712         ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2713             ioc->request_sz);
2714         ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2715             ioc->request_sz);
2716
2717         /* internal queue */
2718         ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2719             ioc->request_sz);
2720         ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2721             ioc->request_sz);
2722
2723         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2724                 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
2725                 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2726             (ioc->hba_queue_depth * ioc->request_sz)/1024));
2727
2728         dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
2729             ioc->name, (unsigned long long) ioc->request_dma));
2730         total_sz += sz;
2731
2732         sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2733         ioc->scsi_lookup_pages = get_order(sz);
2734         ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2735             GFP_KERNEL, ioc->scsi_lookup_pages);
2736         if (!ioc->scsi_lookup) {
2737                 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
2738                         ioc->name, (int)sz);
2739                 goto out;
2740         }
2741
2742         dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
2743                 ioc->name, ioc->request, ioc->scsiio_depth));
2744
2745         ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2746         sz = ioc->chain_depth * sizeof(struct chain_tracker);
2747         ioc->chain_pages = get_order(sz);
2748         ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2749             GFP_KERNEL, ioc->chain_pages);
2750         if (!ioc->chain_lookup) {
2751                 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
2752                         ioc->name);
2753                 goto out;
2754         }
2755         ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2756             ioc->request_sz, 16, 0);
2757         if (!ioc->chain_dma_pool) {
2758                 pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
2759                         ioc->name);
2760                 goto out;
2761         }
2762         for (i = 0; i < ioc->chain_depth; i++) {
2763                 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2764                     ioc->chain_dma_pool , GFP_KERNEL,
2765                     &ioc->chain_lookup[i].chain_buffer_dma);
2766                 if (!ioc->chain_lookup[i].chain_buffer) {
2767                         ioc->chain_depth = i;
2768                         goto chain_done;
2769                 }
2770                 total_sz += ioc->request_sz;
2771         }
2772  chain_done:
2773         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2774                 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
2775                 ioc->name, ioc->chain_depth, ioc->request_sz,
2776                 ((ioc->chain_depth *  ioc->request_sz))/1024));
2777
2778         /* initialize hi-priority queue smid's */
2779         ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2780             sizeof(struct request_tracker), GFP_KERNEL);
2781         if (!ioc->hpr_lookup) {
2782                 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
2783                     ioc->name);
2784                 goto out;
2785         }
2786         ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2787         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2788                 "hi_priority(0x%p): depth(%d), start smid(%d)\n",
2789                 ioc->name, ioc->hi_priority,
2790             ioc->hi_priority_depth, ioc->hi_priority_smid));
2791
2792         /* initialize internal queue smid's */
2793         ioc->internal_lookup = kcalloc(ioc->internal_depth,
2794             sizeof(struct request_tracker), GFP_KERNEL);
2795         if (!ioc->internal_lookup) {
2796                 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
2797                     ioc->name);
2798                 goto out;
2799         }
2800         ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2801         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2802                 "internal(0x%p): depth(%d), start smid(%d)\n",
2803                 ioc->name, ioc->internal,
2804             ioc->internal_depth, ioc->internal_smid));
2805
2806         /* sense buffers, 4 byte align */
2807         sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2808         ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2809             0);
2810         if (!ioc->sense_dma_pool) {
2811                 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
2812                     ioc->name);
2813                 goto out;
2814         }
2815         ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2816             &ioc->sense_dma);
2817         if (!ioc->sense) {
2818                 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
2819                     ioc->name);
2820                 goto out;
2821         }
2822         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2823             "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2824             "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2825             SCSI_SENSE_BUFFERSIZE, sz/1024));
2826         dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
2827             ioc->name, (unsigned long long)ioc->sense_dma));
2828         total_sz += sz;
2829
2830         /* reply pool, 4 byte align */
2831         sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2832         ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2833             0);
2834         if (!ioc->reply_dma_pool) {
2835                 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
2836                     ioc->name);
2837                 goto out;
2838         }
2839         ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2840             &ioc->reply_dma);
2841         if (!ioc->reply) {
2842                 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
2843                     ioc->name);
2844                 goto out;
2845         }
2846         ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2847         ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2848         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2849                 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
2850                 ioc->name, ioc->reply,
2851             ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2852         dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
2853             ioc->name, (unsigned long long)ioc->reply_dma));
2854         total_sz += sz;
2855
2856         /* reply free queue, 16 byte align */
2857         sz = ioc->reply_free_queue_depth * 4;
2858         ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2859             ioc->pdev, sz, 16, 0);
2860         if (!ioc->reply_free_dma_pool) {
2861                 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
2862                         ioc->name);
2863                 goto out;
2864         }
2865         ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2866             &ioc->reply_free_dma);
2867         if (!ioc->reply_free) {
2868                 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
2869                         ioc->name);
2870                 goto out;
2871         }
2872         memset(ioc->reply_free, 0, sz);
2873         dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
2874             "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2875             ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2876         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2877                 "reply_free_dma (0x%llx)\n",
2878                 ioc->name, (unsigned long long)ioc->reply_free_dma));
2879         total_sz += sz;
2880
2881         /* reply post queue, 16 byte align */
2882         reply_post_free_sz = ioc->reply_post_queue_depth *
2883             sizeof(Mpi2DefaultReplyDescriptor_t);
2884         if (_base_is_controller_msix_enabled(ioc))
2885                 sz = reply_post_free_sz * ioc->reply_queue_count;
2886         else
2887                 sz = reply_post_free_sz;
2888         ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2889             ioc->pdev, sz, 16, 0);
2890         if (!ioc->reply_post_free_dma_pool) {
2891                 pr_err(MPT3SAS_FMT
2892                         "reply_post_free pool: pci_pool_create failed\n",
2893                         ioc->name);
2894                 goto out;
2895         }
2896         ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2897             GFP_KERNEL, &ioc->reply_post_free_dma);
2898         if (!ioc->reply_post_free) {
2899                 pr_err(MPT3SAS_FMT
2900                         "reply_post_free pool: pci_pool_alloc failed\n",
2901                         ioc->name);
2902                 goto out;
2903         }
2904         memset(ioc->reply_post_free, 0, sz);
2905         dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
2906             "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2907             ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2908             sz/1024));
2909         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2910                 "reply_post_free_dma = (0x%llx)\n",
2911                 ioc->name, (unsigned long long)
2912             ioc->reply_post_free_dma));
2913         total_sz += sz;
2914
2915         ioc->config_page_sz = 512;
2916         ioc->config_page = pci_alloc_consistent(ioc->pdev,
2917             ioc->config_page_sz, &ioc->config_page_dma);
2918         if (!ioc->config_page) {
2919                 pr_err(MPT3SAS_FMT
2920                         "config page: pci_pool_alloc failed\n",
2921                         ioc->name);
2922                 goto out;
2923         }
2924         dinitprintk(ioc, pr_info(MPT3SAS_FMT
2925                 "config page(0x%p): size(%d)\n",
2926                 ioc->name, ioc->config_page, ioc->config_page_sz));
2927         dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
2928                 ioc->name, (unsigned long long)ioc->config_page_dma));
2929         total_sz += ioc->config_page_sz;
2930
2931         pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
2932             ioc->name, total_sz/1024);
2933         pr_info(MPT3SAS_FMT
2934                 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
2935             ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2936         pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
2937             ioc->name, ioc->shost->sg_tablesize);
2938         return 0;
2939
2940  out:
2941         return -ENOMEM;
2942 }
2943
2944 /**
2945  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
2946  * @ioc: Pointer to MPT_ADAPTER structure
2947  * @cooked: Request raw or cooked IOC state
2948  *
2949  * Returns all IOC Doorbell register bits if cooked==0, else just the
2950  * Doorbell bits in MPI_IOC_STATE_MASK.
2951  */
2952 u32
2953 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
2954 {
2955         u32 s, sc;
2956
2957         s = readl(&ioc->chip->Doorbell);
2958         sc = s & MPI2_IOC_STATE_MASK;
2959         return cooked ? sc : s;
2960 }
2961
2962 /**
2963  * _base_wait_on_iocstate - waiting on a particular ioc state
2964  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2965  * @timeout: timeout in second
2966  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2967  *
2968  * Returns 0 for success, non-zero for failure.
2969  */
2970 static int
2971 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2972         int sleep_flag)
2973 {
2974         u32 count, cntdn;
2975         u32 current_state;
2976
2977         count = 0;
2978         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2979         do {
2980                 current_state = mpt3sas_base_get_iocstate(ioc, 1);
2981                 if (current_state == ioc_state)
2982                         return 0;
2983                 if (count && current_state == MPI2_IOC_STATE_FAULT)
2984                         break;
2985                 if (sleep_flag == CAN_SLEEP)
2986                         usleep_range(1000, 1500);
2987                 else
2988                         udelay(500);
2989                 count++;
2990         } while (--cntdn);
2991
2992         return current_state;
2993 }
2994
2995 /**
2996  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2997  * a write to the doorbell)
2998  * @ioc: per adapter object
2999  * @timeout: timeout in second
3000  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3001  *
3002  * Returns 0 for success, non-zero for failure.
3003  *
3004  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3005  */
3006 static int
3007 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3008         int sleep_flag)
3009 {
3010         u32 cntdn, count;
3011         u32 int_status;
3012
3013         count = 0;
3014         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3015         do {
3016                 int_status = readl(&ioc->chip->HostInterruptStatus);
3017                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3018                         dhsprintk(ioc, pr_info(MPT3SAS_FMT
3019                                 "%s: successful count(%d), timeout(%d)\n",
3020                                 ioc->name, __func__, count, timeout));
3021                         return 0;
3022                 }
3023                 if (sleep_flag == CAN_SLEEP)
3024                         usleep_range(1000, 1500);
3025                 else
3026                         udelay(500);
3027                 count++;
3028         } while (--cntdn);
3029
3030         pr_err(MPT3SAS_FMT
3031                 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3032                 ioc->name, __func__, count, int_status);
3033         return -EFAULT;
3034 }
3035
3036 /**
3037  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3038  * @ioc: per adapter object
3039  * @timeout: timeout in second
3040  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3041  *
3042  * Returns 0 for success, non-zero for failure.
3043  *
3044  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3045  * doorbell.
3046  */
3047 static int
3048 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3049         int sleep_flag)
3050 {
3051         u32 cntdn, count;
3052         u32 int_status;
3053         u32 doorbell;
3054
3055         count = 0;
3056         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3057         do {
3058                 int_status = readl(&ioc->chip->HostInterruptStatus);
3059                 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3060                         dhsprintk(ioc, pr_info(MPT3SAS_FMT
3061                                 "%s: successful count(%d), timeout(%d)\n",
3062                                 ioc->name, __func__, count, timeout));
3063                         return 0;
3064                 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3065                         doorbell = readl(&ioc->chip->Doorbell);
3066                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
3067                             MPI2_IOC_STATE_FAULT) {
3068                                 mpt3sas_base_fault_info(ioc , doorbell);
3069                                 return -EFAULT;
3070                         }
3071                 } else if (int_status == 0xFFFFFFFF)
3072                         goto out;
3073
3074                 if (sleep_flag == CAN_SLEEP)
3075                         usleep_range(1000, 1500);
3076                 else
3077                         udelay(500);
3078                 count++;
3079         } while (--cntdn);
3080
3081  out:
3082         pr_err(MPT3SAS_FMT
3083          "%s: failed due to timeout count(%d), int_status(%x)!\n",
3084          ioc->name, __func__, count, int_status);
3085         return -EFAULT;
3086 }
3087
3088 /**
3089  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3090  * @ioc: per adapter object
3091  * @timeout: timeout in second
3092  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3093  *
3094  * Returns 0 for success, non-zero for failure.
3095  *
3096  */
3097 static int
3098 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3099         int sleep_flag)
3100 {
3101         u32 cntdn, count;
3102         u32 doorbell_reg;
3103
3104         count = 0;
3105         cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3106         do {
3107                 doorbell_reg = readl(&ioc->chip->Doorbell);
3108                 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3109                         dhsprintk(ioc, pr_info(MPT3SAS_FMT
3110                                 "%s: successful count(%d), timeout(%d)\n",
3111                                 ioc->name, __func__, count, timeout));
3112                         return 0;
3113                 }
3114                 if (sleep_flag == CAN_SLEEP)
3115                         usleep_range(1000, 1500);
3116                 else
3117                         udelay(500);
3118                 count++;
3119         } while (--cntdn);
3120
3121         pr_err(MPT3SAS_FMT
3122                 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3123                 ioc->name, __func__, count, doorbell_reg);
3124         return -EFAULT;
3125 }
3126
3127 /**
3128  * _base_send_ioc_reset - send doorbell reset
3129  * @ioc: per adapter object
3130  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3131  * @timeout: timeout in second
3132  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3133  *
3134  * Returns 0 for success, non-zero for failure.
3135  */
3136 static int
3137 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3138         int sleep_flag)
3139 {
3140         u32 ioc_state;
3141         int r = 0;
3142
3143         if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3144                 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3145                     ioc->name, __func__);
3146                 return -EFAULT;
3147         }
3148
3149         if (!(ioc->facts.IOCCapabilities &
3150            MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3151                 return -EFAULT;
3152
3153         pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3154
3155         writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3156             &ioc->chip->Doorbell);
3157         if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3158                 r = -EFAULT;
3159                 goto out;
3160         }
3161         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3162             timeout, sleep_flag);
3163         if (ioc_state) {
3164                 pr_err(MPT3SAS_FMT
3165                         "%s: failed going to ready state (ioc_state=0x%x)\n",
3166                         ioc->name, __func__, ioc_state);
3167                 r = -EFAULT;
3168                 goto out;
3169         }
3170  out:
3171         pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3172             ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3173         return r;
3174 }
3175
3176 /**
3177  * _base_handshake_req_reply_wait - send request thru doorbell interface
3178  * @ioc: per adapter object
3179  * @request_bytes: request length
3180  * @request: pointer having request payload
3181  * @reply_bytes: reply length
3182  * @reply: pointer to reply payload
3183  * @timeout: timeout in second
3184  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3185  *
3186  * Returns 0 for success, non-zero for failure.
3187  */
3188 static int
3189 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3190         u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3191 {
3192         MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3193         int i;
3194         u8 failed;
3195         u16 dummy;
3196         __le32 *mfp;
3197
3198         /* make sure doorbell is not in use */
3199         if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3200                 pr_err(MPT3SAS_FMT
3201                         "doorbell is in use (line=%d)\n",
3202                         ioc->name, __LINE__);
3203                 return -EFAULT;
3204         }
3205
3206         /* clear pending doorbell interrupts from previous state changes */
3207         if (readl(&ioc->chip->HostInterruptStatus) &
3208             MPI2_HIS_IOC2SYS_DB_STATUS)
3209                 writel(0, &ioc->chip->HostInterruptStatus);
3210
3211         /* send message to ioc */
3212         writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3213             ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3214             &ioc->chip->Doorbell);
3215
3216         if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3217                 pr_err(MPT3SAS_FMT
3218                         "doorbell handshake int failed (line=%d)\n",
3219                         ioc->name, __LINE__);
3220                 return -EFAULT;
3221         }
3222         writel(0, &ioc->chip->HostInterruptStatus);
3223
3224         if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3225                 pr_err(MPT3SAS_FMT
3226                         "doorbell handshake ack failed (line=%d)\n",
3227                         ioc->name, __LINE__);
3228                 return -EFAULT;
3229         }
3230
3231         /* send message 32-bits at a time */
3232         for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3233                 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3234                 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3235                         failed = 1;
3236         }
3237
3238         if (failed) {
3239                 pr_err(MPT3SAS_FMT
3240                         "doorbell handshake sending request failed (line=%d)\n",
3241                         ioc->name, __LINE__);
3242                 return -EFAULT;
3243         }
3244
3245         /* now wait for the reply */
3246         if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3247                 pr_err(MPT3SAS_FMT
3248                         "doorbell handshake int failed (line=%d)\n",
3249                         ioc->name, __LINE__);
3250                 return -EFAULT;
3251         }
3252
3253         /* read the first two 16-bits, it gives the total length of the reply */
3254         reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3255             & MPI2_DOORBELL_DATA_MASK);
3256         writel(0, &ioc->chip->HostInterruptStatus);
3257         if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3258                 pr_err(MPT3SAS_FMT
3259                         "doorbell handshake int failed (line=%d)\n",
3260                         ioc->name, __LINE__);
3261                 return -EFAULT;
3262         }
3263         reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3264             & MPI2_DOORBELL_DATA_MASK);
3265         writel(0, &ioc->chip->HostInterruptStatus);
3266
3267         for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3268                 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3269                         pr_err(MPT3SAS_FMT
3270                                 "doorbell handshake int failed (line=%d)\n",
3271                                 ioc->name, __LINE__);
3272                         return -EFAULT;
3273                 }
3274                 if (i >=  reply_bytes/2) /* overflow case */
3275                         dummy = readl(&ioc->chip->Doorbell);
3276                 else
3277                         reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3278                             & MPI2_DOORBELL_DATA_MASK);
3279                 writel(0, &ioc->chip->HostInterruptStatus);
3280         }
3281
3282         _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3283         if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3284                 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3285                         "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3286         }
3287         writel(0, &ioc->chip->HostInterruptStatus);
3288
3289         if (ioc->logging_level & MPT_DEBUG_INIT) {
3290                 mfp = (__le32 *)reply;
3291                 pr_info("\toffset:data\n");
3292                 for (i = 0; i < reply_bytes/4; i++)
3293                         pr_info("\t[0x%02x]:%08x\n", i*4,
3294                             le32_to_cpu(mfp[i]));
3295         }
3296         return 0;
3297 }
3298
3299 /**
3300  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3301  * @ioc: per adapter object
3302  * @mpi_reply: the reply payload from FW
3303  * @mpi_request: the request payload sent to FW
3304  *
3305  * The SAS IO Unit Control Request message allows the host to perform low-level
3306  * operations, such as resets on the PHYs of the IO Unit, also allows the host
3307  * to obtain the IOC assigned device handles for a device if it has other
3308  * identifying information about the device, in addition allows the host to
3309  * remove IOC resources associated with the device.
3310  *
3311  * Returns 0 for success, non-zero for failure.
3312  */
3313 int
3314 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
3315         Mpi2SasIoUnitControlReply_t *mpi_reply,
3316         Mpi2SasIoUnitControlRequest_t *mpi_request)
3317 {
3318         u16 smid;
3319         u32 ioc_state;
3320         unsigned long timeleft;
3321         u8 issue_reset;
3322         int rc;
3323         void *request;
3324         u16 wait_state_count;
3325
3326         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3327             __func__));
3328
3329         mutex_lock(&ioc->base_cmds.mutex);
3330
3331         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3332                 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3333                     ioc->name, __func__);
3334                 rc = -EAGAIN;
3335                 goto out;
3336         }
3337
3338         wait_state_count = 0;
3339         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3340         while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3341                 if (wait_state_count++ == 10) {
3342                         pr_err(MPT3SAS_FMT
3343                             "%s: failed due to ioc not operational\n",
3344                             ioc->name, __func__);
3345                         rc = -EFAULT;
3346                         goto out;
3347                 }
3348                 ssleep(1);
3349                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3350                 pr_info(MPT3SAS_FMT
3351                         "%s: waiting for operational state(count=%d)\n",
3352                         ioc->name, __func__, wait_state_count);
3353         }
3354
3355         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3356         if (!smid) {
3357                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3358                     ioc->name, __func__);
3359                 rc = -EAGAIN;
3360                 goto out;
3361         }
3362
3363         rc = 0;
3364         ioc->base_cmds.status = MPT3_CMD_PENDING;
3365         request = mpt3sas_base_get_msg_frame(ioc, smid);
3366         ioc->base_cmds.smid = smid;
3367         memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3368         if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3369             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3370                 ioc->ioc_link_reset_in_progress = 1;
3371         init_completion(&ioc->base_cmds.done);
3372         mpt3sas_base_put_smid_default(ioc, smid);
3373         timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3374             msecs_to_jiffies(10000));
3375         if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3376             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3377             ioc->ioc_link_reset_in_progress)
3378                 ioc->ioc_link_reset_in_progress = 0;
3379         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3380                 pr_err(MPT3SAS_FMT "%s: timeout\n",
3381                     ioc->name, __func__);
3382                 _debug_dump_mf(mpi_request,
3383                     sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3384                 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3385                         issue_reset = 1;
3386                 goto issue_host_reset;
3387         }
3388         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3389                 memcpy(mpi_reply, ioc->base_cmds.reply,
3390                     sizeof(Mpi2SasIoUnitControlReply_t));
3391         else
3392                 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3393         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3394         goto out;
3395
3396  issue_host_reset:
3397         if (issue_reset)
3398                 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3399                     FORCE_BIG_HAMMER);
3400         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3401         rc = -EFAULT;
3402  out:
3403         mutex_unlock(&ioc->base_cmds.mutex);
3404         return rc;
3405 }
3406
3407 /**
3408  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
3409  * @ioc: per adapter object
3410  * @mpi_reply: the reply payload from FW
3411  * @mpi_request: the request payload sent to FW
3412  *
3413  * The SCSI Enclosure Processor request message causes the IOC to
3414  * communicate with SES devices to control LED status signals.
3415  *
3416  * Returns 0 for success, non-zero for failure.
3417  */
3418 int
3419 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
3420         Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3421 {
3422         u16 smid;
3423         u32 ioc_state;
3424         unsigned long timeleft;
3425         u8 issue_reset;
3426         int rc;
3427         void *request;
3428         u16 wait_state_count;
3429
3430         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3431             __func__));
3432
3433         mutex_lock(&ioc->base_cmds.mutex);
3434
3435         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3436                 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3437                     ioc->name, __func__);
3438                 rc = -EAGAIN;
3439                 goto out;
3440         }
3441
3442         wait_state_count = 0;
3443         ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3444         while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3445                 if (wait_state_count++ == 10) {
3446                         pr_err(MPT3SAS_FMT
3447                             "%s: failed due to ioc not operational\n",
3448                             ioc->name, __func__);
3449                         rc = -EFAULT;
3450                         goto out;
3451                 }
3452                 ssleep(1);
3453                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3454                 pr_info(MPT3SAS_FMT
3455                         "%s: waiting for operational state(count=%d)\n",
3456                         ioc->name,
3457                     __func__, wait_state_count);
3458         }
3459
3460         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3461         if (!smid) {
3462                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3463                     ioc->name, __func__);
3464                 rc = -EAGAIN;
3465                 goto out;
3466         }
3467
3468         rc = 0;
3469         ioc->base_cmds.status = MPT3_CMD_PENDING;
3470         request = mpt3sas_base_get_msg_frame(ioc, smid);
3471         ioc->base_cmds.smid = smid;
3472         memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3473         init_completion(&ioc->base_cmds.done);
3474         mpt3sas_base_put_smid_default(ioc, smid);
3475         timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3476             msecs_to_jiffies(10000));
3477         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3478                 pr_err(MPT3SAS_FMT "%s: timeout\n",
3479                     ioc->name, __func__);
3480                 _debug_dump_mf(mpi_request,
3481                     sizeof(Mpi2SepRequest_t)/4);
3482                 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3483                         issue_reset = 1;
3484                 goto issue_host_reset;
3485         }
3486         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3487                 memcpy(mpi_reply, ioc->base_cmds.reply,
3488                     sizeof(Mpi2SepReply_t));
3489         else
3490                 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3491         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3492         goto out;
3493
3494  issue_host_reset:
3495         if (issue_reset)
3496                 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3497                     FORCE_BIG_HAMMER);
3498         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3499         rc = -EFAULT;
3500  out:
3501         mutex_unlock(&ioc->base_cmds.mutex);
3502         return rc;
3503 }
3504
3505 /**
3506  * _base_get_port_facts - obtain port facts reply and save in ioc
3507  * @ioc: per adapter object
3508  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3509  *
3510  * Returns 0 for success, non-zero for failure.
3511  */
3512 static int
3513 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
3514 {
3515         Mpi2PortFactsRequest_t mpi_request;
3516         Mpi2PortFactsReply_t mpi_reply;
3517         struct mpt3sas_port_facts *pfacts;
3518         int mpi_reply_sz, mpi_request_sz, r;
3519
3520         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3521             __func__));
3522
3523         mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3524         mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3525         memset(&mpi_request, 0, mpi_request_sz);
3526         mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3527         mpi_request.PortNumber = port;
3528         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3529             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3530
3531         if (r != 0) {
3532                 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3533                     ioc->name, __func__, r);
3534                 return r;
3535         }
3536
3537         pfacts = &ioc->pfacts[port];
3538         memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
3539         pfacts->PortNumber = mpi_reply.PortNumber;
3540         pfacts->VP_ID = mpi_reply.VP_ID;
3541         pfacts->VF_ID = mpi_reply.VF_ID;
3542         pfacts->MaxPostedCmdBuffers =
3543             le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3544
3545         return 0;
3546 }
3547
3548 /**
3549  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3550  * @ioc: per adapter object
3551  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3552  *
3553  * Returns 0 for success, non-zero for failure.
3554  */
3555 static int
3556 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3557 {
3558         Mpi2IOCFactsRequest_t mpi_request;
3559         Mpi2IOCFactsReply_t mpi_reply;
3560         struct mpt3sas_facts *facts;
3561         int mpi_reply_sz, mpi_request_sz, r;
3562
3563         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3564             __func__));
3565
3566         mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3567         mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3568         memset(&mpi_request, 0, mpi_request_sz);
3569         mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3570         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3571             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3572
3573         if (r != 0) {
3574                 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3575                     ioc->name, __func__, r);
3576                 return r;
3577         }
3578
3579         facts = &ioc->facts;
3580         memset(facts, 0, sizeof(struct mpt3sas_facts));
3581         facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3582         facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3583         facts->VP_ID = mpi_reply.VP_ID;
3584         facts->VF_ID = mpi_reply.VF_ID;
3585         facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3586         facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3587         facts->WhoInit = mpi_reply.WhoInit;
3588         facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3589         facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3590         facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3591         facts->MaxReplyDescriptorPostQueueDepth =
3592             le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3593         facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3594         facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3595         if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3596                 ioc->ir_firmware = 1;
3597         facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3598         facts->IOCRequestFrameSize =
3599             le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3600         facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3601         facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3602         ioc->shost->max_id = -1;
3603         facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3604         facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3605         facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3606         facts->HighPriorityCredit =
3607             le16_to_cpu(mpi_reply.HighPriorityCredit);
3608         facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3609         facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3610
3611         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3612                 "hba queue depth(%d), max chains per io(%d)\n",
3613                 ioc->name, facts->RequestCredit,
3614             facts->MaxChainDepth));
3615         dinitprintk(ioc, pr_info(MPT3SAS_FMT
3616                 "request frame size(%d), reply frame size(%d)\n", ioc->name,
3617             facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3618         return 0;
3619 }
3620
3621 /**
3622  * _base_send_ioc_init - send ioc_init to firmware
3623  * @ioc: per adapter object
3624  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3625  *
3626  * Returns 0 for success, non-zero for failure.
3627  */
3628 static int
3629 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3630 {
3631         Mpi2IOCInitRequest_t mpi_request;
3632         Mpi2IOCInitReply_t mpi_reply;
3633         int r;
3634         struct timeval current_time;
3635         u16 ioc_status;
3636
3637         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3638             __func__));
3639
3640         memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3641         mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3642         mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3643         mpi_request.VF_ID = 0; /* TODO */
3644         mpi_request.VP_ID = 0;
3645         mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3646         mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3647
3648         if (_base_is_controller_msix_enabled(ioc))
3649                 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3650         mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3651         mpi_request.ReplyDescriptorPostQueueDepth =
3652             cpu_to_le16(ioc->reply_post_queue_depth);
3653         mpi_request.ReplyFreeQueueDepth =
3654             cpu_to_le16(ioc->reply_free_queue_depth);
3655
3656         mpi_request.SenseBufferAddressHigh =
3657             cpu_to_le32((u64)ioc->sense_dma >> 32);
3658         mpi_request.SystemReplyAddressHigh =
3659             cpu_to_le32((u64)ioc->reply_dma >> 32);
3660         mpi_request.SystemRequestFrameBaseAddress =
3661             cpu_to_le64((u64)ioc->request_dma);
3662         mpi_request.ReplyFreeQueueAddress =
3663             cpu_to_le64((u64)ioc->reply_free_dma);
3664         mpi_request.ReplyDescriptorPostQueueAddress =
3665             cpu_to_le64((u64)ioc->reply_post_free_dma);
3666
3667
3668         /* This time stamp specifies number of milliseconds
3669          * since epoch ~ midnight January 1, 1970.
3670          */
3671         do_gettimeofday(&current_time);
3672         mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3673             (current_time.tv_usec / 1000));
3674
3675         if (ioc->logging_level & MPT_DEBUG_INIT) {
3676                 __le32 *mfp;
3677                 int i;
3678
3679                 mfp = (__le32 *)&mpi_request;
3680                 pr_info("\toffset:data\n");
3681                 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3682                         pr_info("\t[0x%02x]:%08x\n", i*4,
3683                             le32_to_cpu(mfp[i]));
3684         }
3685
3686         r = _base_handshake_req_reply_wait(ioc,
3687             sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3688             sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3689             sleep_flag);
3690
3691         if (r != 0) {
3692                 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3693                     ioc->name, __func__, r);
3694                 return r;
3695         }
3696
3697         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3698         if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3699             mpi_reply.IOCLogInfo) {
3700                 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
3701                 r = -EIO;
3702         }
3703
3704         return 0;
3705 }
3706
3707 /**
3708  * mpt3sas_port_enable_done - command completion routine for port enable
3709  * @ioc: per adapter object
3710  * @smid: system request message index
3711  * @msix_index: MSIX table index supplied by the OS
3712  * @reply: reply message frame(lower 32bit addr)
3713  *
3714  * Return 1 meaning mf should be freed from _base_interrupt
3715  *        0 means the mf is freed from this function.
3716  */
3717 u8
3718 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3719         u32 reply)
3720 {
3721         MPI2DefaultReply_t *mpi_reply;
3722         u16 ioc_status;
3723
3724         if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
3725                 return 1;
3726
3727         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
3728         if (!mpi_reply)
3729                 return 1;
3730
3731         if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
3732                 return 1;
3733
3734         ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
3735         ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
3736         ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
3737         memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
3738         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3739         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3740                 ioc->port_enable_failed = 1;
3741
3742         if (ioc->is_driver_loading) {
3743                 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3744                         mpt3sas_port_enable_complete(ioc);
3745                         return 1;
3746                 } else {
3747                         ioc->start_scan_failed = ioc_status;
3748                         ioc->start_scan = 0;
3749                         return 1;
3750                 }
3751         }
3752         complete(&ioc->port_enable_cmds.done);
3753         return 1;
3754 }
3755
3756 /**
3757  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3758  * @ioc: per adapter object
3759  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3760  *
3761  * Returns 0 for success, non-zero for failure.
3762  */
3763 static int
3764 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3765 {
3766         Mpi2PortEnableRequest_t *mpi_request;
3767         Mpi2PortEnableReply_t *mpi_reply;
3768         unsigned long timeleft;
3769         int r = 0;
3770         u16 smid;
3771         u16 ioc_status;
3772
3773         pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
3774
3775         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
3776                 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3777                     ioc->name, __func__);
3778                 return -EAGAIN;
3779         }
3780
3781         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3782         if (!smid) {
3783                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3784                     ioc->name, __func__);
3785                 return -EAGAIN;
3786         }
3787
3788         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
3789         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3790         ioc->port_enable_cmds.smid = smid;
3791         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3792         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3793
3794         init_completion(&ioc->port_enable_cmds.done);
3795         mpt3sas_base_put_smid_default(ioc, smid);
3796         timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3797             300*HZ);
3798         if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
3799                 pr_err(MPT3SAS_FMT "%s: timeout\n",
3800                     ioc->name, __func__);
3801                 _debug_dump_mf(mpi_request,
3802                     sizeof(Mpi2PortEnableRequest_t)/4);
3803                 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
3804                         r = -EFAULT;
3805                 else
3806                         r = -ETIME;
3807                 goto out;
3808         }
3809
3810         mpi_reply = ioc->port_enable_cmds.reply;
3811         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3812         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3813                 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
3814                     ioc->name, __func__, ioc_status);
3815                 r = -EFAULT;
3816                 goto out;
3817         }
3818
3819  out:
3820         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
3821         pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3822             "SUCCESS" : "FAILED"));
3823         return r;
3824 }
3825
3826 /**
3827  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
3828  * @ioc: per adapter object
3829  *
3830  * Returns 0 for success, non-zero for failure.
3831  */
3832 int
3833 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
3834 {
3835         Mpi2PortEnableRequest_t *mpi_request;
3836         u16 smid;
3837
3838         pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
3839
3840         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
3841                 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3842                     ioc->name, __func__);
3843                 return -EAGAIN;
3844         }
3845
3846         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3847         if (!smid) {
3848                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3849                     ioc->name, __func__);
3850                 return -EAGAIN;
3851         }
3852
3853         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
3854         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3855         ioc->port_enable_cmds.smid = smid;
3856         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3857         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3858
3859         mpt3sas_base_put_smid_default(ioc, smid);
3860         return 0;
3861 }
3862
3863 /**
3864  * _base_determine_wait_on_discovery - desposition
3865  * @ioc: per adapter object
3866  *
3867  * Decide whether to wait on discovery to complete. Used to either
3868  * locate boot device, or report volumes ahead of physical devices.
3869  *
3870  * Returns 1 for wait, 0 for don't wait
3871  */
3872 static int
3873 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
3874 {
3875         /* We wait for discovery to complete if IR firmware is loaded.
3876          * The sas topology events arrive before PD events, so we need time to
3877          * turn on the bit in ioc->pd_handles to indicate PD
3878          * Also, it maybe required to report Volumes ahead of physical
3879          * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3880          */
3881         if (ioc->ir_firmware)
3882                 return 1;
3883
3884         /* if no Bios, then we don't need to wait */
3885         if (!ioc->bios_pg3.BiosVersion)
3886                 return 0;
3887
3888         /* Bios is present, then we drop down here.
3889          *
3890          * If there any entries in the Bios Page 2, then we wait
3891          * for discovery to complete.
3892          */
3893
3894         /* Current Boot Device */
3895         if ((ioc->bios_pg2.CurrentBootDeviceForm &
3896             MPI2_BIOSPAGE2_FORM_MASK) ==
3897             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3898         /* Request Boot Device */
3899            (ioc->bios_pg2.ReqBootDeviceForm &
3900             MPI2_BIOSPAGE2_FORM_MASK) ==
3901             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3902         /* Alternate Request Boot Device */
3903            (ioc->bios_pg2.ReqAltBootDeviceForm &
3904             MPI2_BIOSPAGE2_FORM_MASK) ==
3905             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3906                 return 0;
3907
3908         return 1;
3909 }
3910
3911 /**
3912  * _base_unmask_events - turn on notification for this event
3913  * @ioc: per adapter object
3914  * @event: firmware event
3915  *
3916  * The mask is stored in ioc->event_masks.
3917  */
3918 static void
3919 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
3920 {
3921         u32 desired_event;
3922
3923         if (event >= 128)
3924                 return;
3925
3926         desired_event = (1 << (event % 32));
3927
3928         if (event < 32)
3929                 ioc->event_masks[0] &= ~desired_event;
3930         else if (event < 64)
3931                 ioc->event_masks[1] &= ~desired_event;
3932         else if (event < 96)
3933                 ioc->event_masks[2] &= ~desired_event;
3934         else if (event < 128)
3935                 ioc->event_masks[3] &= ~desired_event;
3936 }
3937
3938 /**
3939  * _base_event_notification - send event notification
3940  * @ioc: per adapter object
3941  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3942  *
3943  * Returns 0 for success, non-zero for failure.
3944  */
3945 static int
3946 _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3947 {
3948         Mpi2EventNotificationRequest_t *mpi_request;
3949         unsigned long timeleft;
3950         u16 smid;
3951         int r = 0;
3952         int i;
3953
3954         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3955             __func__));
3956
3957         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3958                 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3959                     ioc->name, __func__);
3960                 return -EAGAIN;
3961         }
3962
3963         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3964         if (!smid) {
3965                 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3966                     ioc->name, __func__);
3967                 return -EAGAIN;
3968         }
3969         ioc->base_cmds.status = MPT3_CMD_PENDING;
3970         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3971         ioc->base_cmds.smid = smid;
3972         memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3973         mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3974         mpi_request->VF_ID = 0; /* TODO */
3975         mpi_request->VP_ID = 0;
3976         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3977                 mpi_request->EventMasks[i] =
3978                     cpu_to_le32(ioc->event_masks[i]);
3979         init_completion(&ioc->base_cmds.done);
3980         mpt3sas_base_put_smid_default(ioc, smid);
3981         timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3982         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3983                 pr_err(MPT3SAS_FMT "%s: timeout\n",
3984                     ioc->name, __func__);
3985                 _debug_dump_mf(mpi_request,
3986                     sizeof(Mpi2EventNotificationRequest_t)/4);
3987                 if (ioc->base_cmds.status & MPT3_CMD_RESET)
3988                         r = -EFAULT;
3989                 else
3990                         r = -ETIME;
3991         } else
3992                 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
3993                     ioc->name, __func__));
3994         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3995         return r;
3996 }
3997
3998 /**
3999  * mpt3sas_base_validate_event_type - validating event types
4000  * @ioc: per adapter object
4001  * @event: firmware event
4002  *
4003  * This will turn on firmware event notification when application
4004  * ask for that event. We don't mask events that are already enabled.
4005  */
4006 void
4007 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4008 {
4009         int i, j;
4010         u32 event_mask, desired_event;
4011         u8 send_update_to_fw;
4012
4013         for (i = 0, send_update_to_fw = 0; i <
4014             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4015                 event_mask = ~event_type[i];
4016                 desired_event = 1;
4017                 for (j = 0; j < 32; j++) {
4018                         if (!(event_mask & desired_event) &&
4019                             (ioc->event_masks[i] & desired_event)) {
4020                                 ioc->event_masks[i] &= ~desired_event;
4021                                 send_update_to_fw = 1;
4022                         }
4023                         desired_event = (desired_event << 1);
4024                 }
4025         }
4026
4027         if (!send_update_to_fw)
4028                 return;
4029
4030         mutex_lock(&ioc->base_cmds.mutex);
4031         _base_event_notification(ioc, CAN_SLEEP);
4032         mutex_unlock(&ioc->base_cmds.mutex);
4033 }
4034
4035 /**
4036  * _base_diag_reset - the "big hammer" start of day reset
4037  * @ioc: per adapter object
4038  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4039  *
4040  * Returns 0 for success, non-zero for failure.
4041  */
4042 static int
4043 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4044 {
4045         u32 host_diagnostic;
4046         u32 ioc_state;
4047         u32 count;
4048         u32 hcb_size;
4049
4050         pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4051
4052         drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4053             ioc->name));
4054
4055         count = 0;
4056         do {
4057                 /* Write magic sequence to WriteSequence register
4058                  * Loop until in diagnostic mode
4059                  */
4060                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4061                         "write magic sequence\n", ioc->name));
4062                 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4063                 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4064                 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4065                 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4066                 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4067                 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4068                 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4069
4070                 /* wait 100 msec */
4071                 if (sleep_flag == CAN_SLEEP)
4072                         msleep(100);
4073                 else
4074                         mdelay(100);
4075
4076                 if (count++ > 20)
4077                         goto out;
4078
4079                 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4080                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4081                         "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4082                     ioc->name, count, host_diagnostic));
4083
4084         } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4085
4086         hcb_size = readl(&ioc->chip->HCBSize);
4087
4088         drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4089             ioc->name));
4090         writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4091              &ioc->chip->HostDiagnostic);
4092
4093         /* don't access any registers for 50 milliseconds */
4094         msleep(50);
4095
4096         /* 300 second max wait */
4097         for (count = 0; count < 3000000 ; count++) {
4098
4099                 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4100
4101                 if (host_diagnostic == 0xFFFFFFFF)
4102                         goto out;
4103                 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4104                         break;
4105
4106                 /* wait 1 msec */
4107                 if (sleep_flag == CAN_SLEEP)
4108                         usleep_range(1000, 1500);
4109                 else
4110                         mdelay(1);
4111         }
4112
4113         if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4114
4115                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4116                 "restart the adapter assuming the HCB Address points to good F/W\n",
4117                     ioc->name));
4118                 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4119                 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4120                 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4121
4122                 drsprintk(ioc, pr_info(MPT3SAS_FMT
4123                     "re-enable the HCDW\n", ioc->name));
4124                 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4125                     &ioc->chip->HCBSize);
4126         }
4127
4128         drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4129             ioc->name));
4130         writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4131             &ioc->chip->HostDiagnostic);
4132
4133         drsprintk(ioc, pr_info(MPT3SAS_FMT
4134                 "disable writes to the diagnostic register\n", ioc->name));
4135         writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4136
4137         drsprintk(ioc, pr_info(MPT3SAS_FMT
4138                 "Wait for FW to go to the READY state\n", ioc->name));
4139         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4140             sleep_flag);
4141         if (ioc_state) {
4142                 pr_err(MPT3SAS_FMT
4143                         "%s: failed going to ready state (ioc_state=0x%x)\n",
4144                         ioc->name, __func__, ioc_state);
4145                 goto out;
4146         }
4147
4148         pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4149         return 0;
4150
4151  out:
4152         pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4153         return -EFAULT;
4154 }
4155
4156 /**
4157  * _base_make_ioc_ready - put controller in READY state
4158  * @ioc: per adapter object
4159  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4160  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4161  *
4162  * Returns 0 for success, non-zero for failure.
4163  */
4164 static int
4165 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4166         enum reset_type type)
4167 {
4168         u32 ioc_state;
4169         int rc;
4170         int count;
4171
4172         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4173             __func__));
4174
4175         if (ioc->pci_error_recovery)
4176                 return 0;
4177
4178         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4179         dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4180             ioc->name, __func__, ioc_state));
4181
4182         /* if in RESET state, it should move to READY state shortly */
4183         count = 0;
4184         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4185                 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4186                     MPI2_IOC_STATE_READY) {
4187                         if (count++ == 10) {
4188                                 pr_err(MPT3SAS_FMT
4189                                         "%s: failed going to ready state (ioc_state=0x%x)\n",
4190                                     ioc->name, __func__, ioc_state);
4191                                 return -EFAULT;
4192                         }
4193                         if (sleep_flag == CAN_SLEEP)
4194                                 ssleep(1);
4195                         else
4196                                 mdelay(1000);
4197                         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4198                 }
4199         }
4200
4201         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4202                 return 0;
4203
4204         if (ioc_state & MPI2_DOORBELL_USED) {
4205                 dhsprintk(ioc, pr_info(MPT3SAS_FMT
4206                         "unexpected doorbell active!\n",
4207                         ioc->name));
4208                 goto issue_diag_reset;
4209         }
4210
4211         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4212                 mpt3sas_base_fault_info(ioc, ioc_state &
4213                     MPI2_DOORBELL_DATA_MASK);
4214                 goto issue_diag_reset;
4215         }
4216
4217         if (type == FORCE_BIG_HAMMER)
4218                 goto issue_diag_reset;
4219
4220         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4221                 if (!(_base_send_ioc_reset(ioc,
4222                     MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4223                         return 0;
4224         }
4225
4226  issue_diag_reset:
4227         rc = _base_diag_reset(ioc, CAN_SLEEP);
4228         return rc;
4229 }
4230
4231 /**
4232  * _base_make_ioc_operational - put controller in OPERATIONAL state
4233  * @ioc: per adapter object
4234  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4235  *
4236  * Returns 0 for success, non-zero for failure.
4237  */
4238 static int
4239 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4240 {
4241         int r, i;
4242         unsigned long   flags;
4243         u32 reply_address;
4244         u16 smid;
4245         struct _tr_list *delayed_tr, *delayed_tr_next;
4246         struct adapter_reply_queue *reply_q;
4247         long reply_post_free;
4248         u32 reply_post_free_sz;
4249
4250         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4251             __func__));
4252
4253         /* clean the delayed target reset list */
4254         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4255             &ioc->delayed_tr_list, list) {
4256                 list_del(&delayed_tr->list);
4257                 kfree(delayed_tr);
4258         }
4259
4260
4261         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4262             &ioc->delayed_tr_volume_list, list) {
4263                 list_del(&delayed_tr->list);
4264                 kfree(delayed_tr);
4265         }
4266
4267         /* initialize the scsi lookup free list */
4268         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4269         INIT_LIST_HEAD(&ioc->free_list);
4270         smid = 1;
4271         for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4272                 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4273                 ioc->scsi_lookup[i].cb_idx = 0xFF;
4274                 ioc->scsi_lookup[i].smid = smid;
4275                 ioc->scsi_lookup[i].scmd = NULL;
4276                 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4277                     &ioc->free_list);
4278         }
4279
4280         /* hi-priority queue */
4281         INIT_LIST_HEAD(&ioc->hpr_free_list);
4282         smid = ioc->hi_priority_smid;
4283         for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4284                 ioc->hpr_lookup[i].cb_idx = 0xFF;
4285                 ioc->hpr_lookup[i].smid = smid;
4286                 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4287                     &ioc->hpr_free_list);
4288         }
4289
4290         /* internal queue */
4291         INIT_LIST_HEAD(&ioc->internal_free_list);
4292         smid = ioc->internal_smid;
4293         for (i = 0; i < ioc->internal_depth; i++, smid++) {
4294                 ioc->internal_lookup[i].cb_idx = 0xFF;
4295                 ioc->internal_lookup[i].smid = smid;
4296                 list_add_tail(&ioc->internal_lookup[i].tracker_list,
4297                     &ioc->internal_free_list);
4298         }
4299
4300         /* chain pool */
4301         INIT_LIST_HEAD(&ioc->free_chain_list);
4302         for (i = 0; i < ioc->chain_depth; i++)
4303                 list_add_tail(&ioc->chain_lookup[i].tracker_list,
4304                     &ioc->free_chain_list);
4305
4306         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4307
4308         /* initialize Reply Free Queue */
4309         for (i = 0, reply_address = (u32)ioc->reply_dma ;
4310             i < ioc->reply_free_queue_depth ; i++, reply_address +=
4311             ioc->reply_sz)
4312                 ioc->reply_free[i] = cpu_to_le32(reply_address);
4313
4314         /* initialize reply queues */
4315         if (ioc->is_driver_loading)
4316                 _base_assign_reply_queues(ioc);
4317
4318         /* initialize Reply Post Free Queue */
4319         reply_post_free = (long)ioc->reply_post_free;
4320         reply_post_free_sz = ioc->reply_post_queue_depth *
4321             sizeof(Mpi2DefaultReplyDescriptor_t);
4322         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4323                 reply_q->reply_post_host_index = 0;
4324                 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4325                     reply_post_free;
4326                 for (i = 0; i < ioc->reply_post_queue_depth; i++)
4327                         reply_q->reply_post_free[i].Words =
4328                             cpu_to_le64(ULLONG_MAX);
4329                 if (!_base_is_controller_msix_enabled(ioc))
4330                         goto skip_init_reply_post_free_queue;
4331                 reply_post_free += reply_post_free_sz;
4332         }
4333  skip_init_reply_post_free_queue:
4334
4335         r = _base_send_ioc_init(ioc, sleep_flag);
4336         if (r)
4337                 return r;
4338
4339         /* initialize reply free host index */
4340         ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4341         writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4342
4343         /* initialize reply post host index */
4344         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4345                 writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4346                     &ioc->chip->ReplyPostHostIndex);
4347                 if (!_base_is_controller_msix_enabled(ioc))
4348                         goto skip_init_reply_post_host_index;
4349         }
4350
4351  skip_init_reply_post_host_index:
4352
4353         _base_unmask_interrupts(ioc);
4354         r = _base_event_notification(ioc, sleep_flag);
4355         if (r)
4356                 return r;
4357
4358         if (sleep_flag == CAN_SLEEP)
4359                 _base_static_config_pages(ioc);
4360
4361
4362         if (ioc->is_driver_loading) {
4363                 ioc->wait_for_discovery_to_complete =
4364                     _base_determine_wait_on_discovery(ioc);
4365
4366                 return r; /* scan_start and scan_finished support */
4367         }
4368
4369         r = _base_send_port_enable(ioc, sleep_flag);
4370         if (r)
4371                 return r;
4372
4373         return r;
4374 }
4375
4376 /**
4377  * mpt3sas_base_free_resources - free resources controller resources
4378  * @ioc: per adapter object
4379  *
4380  * Return nothing.
4381  */
4382 void
4383 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
4384 {
4385         struct pci_dev *pdev = ioc->pdev;
4386
4387         dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4388             __func__));
4389
4390         _base_mask_interrupts(ioc);
4391         ioc->shost_recovery = 1;
4392         _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4393         ioc->shost_recovery = 0;
4394         _base_free_irq(ioc);
4395         _base_disable_msix(ioc);
4396         if (ioc->chip_phys)
4397                 iounmap(ioc->chip);
4398         ioc->chip_phys = 0;
4399         pci_release_selected_regions(ioc->pdev, ioc->bars);
4400         pci_disable_pcie_error_reporting(pdev);
4401         pci_disable_device(pdev);
4402         return;
4403 }
4404
4405 /**
4406  * mpt3sas_base_attach - attach controller instance
4407  * @ioc: per adapter object
4408  *
4409  * Returns 0 for success, non-zero for failure.
4410  */
4411 int
4412 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4413 {
4414         int r, i;
4415         int cpu_id, last_cpu_id = 0;
4416
4417         dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4418             __func__));
4419
4420         /* setup cpu_msix_table */
4421         ioc->cpu_count = num_online_cpus();
4422         for_each_online_cpu(cpu_id)
4423                 last_cpu_id = cpu_id;
4424         ioc->cpu_msix_table_sz = last_cpu_id + 1;
4425         ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4426         ioc->reply_queue_count = 1;
4427         if (!ioc->cpu_msix_table) {
4428                 dfailprintk(ioc, pr_info(MPT3SAS_FMT
4429                         "allocation for cpu_msix_table failed!!!\n",
4430                         ioc->name));
4431                 r = -ENOMEM;
4432                 goto out_free_resources;
4433         }
4434
4435         r = mpt3sas_base_map_resources(ioc);
4436         if (r)
4437                 goto out_free_resources;
4438
4439
4440         pci_set_drvdata(ioc->pdev, ioc->shost);
4441         r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4442         if (r)
4443                 goto out_free_resources;
4444
4445         /*
4446          * In SAS3.0,
4447          * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
4448          * Target Status - all require the IEEE formated scatter gather
4449          * elements.
4450          */
4451
4452         ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
4453         ioc->build_sg = &_base_build_sg_ieee;
4454         ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
4455         ioc->mpi25 = 1;
4456         ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
4457
4458         /*
4459          * These function pointers for other requests that don't
4460          * the require IEEE scatter gather elements.
4461          *
4462          * For example Configuration Pages and SAS IOUNIT Control don't.
4463          */
4464         ioc->build_sg_mpi = &_base_build_sg;
4465         ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
4466
4467         r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4468         if (r)
4469                 goto out_free_resources;
4470
4471         ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4472             sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
4473         if (!ioc->pfacts) {
4474                 r = -ENOMEM;
4475                 goto out_free_resources;
4476         }
4477
4478         for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4479                 r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4480                 if (r)
4481                         goto out_free_resources;
4482         }
4483
4484         r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4485         if (r)
4486                 goto out_free_resources;
4487
4488         init_waitqueue_head(&ioc->reset_wq);
4489
4490         /* allocate memory pd handle bitmask list */
4491         ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4492         if (ioc->facts.MaxDevHandle % 8)
4493                 ioc->pd_handles_sz++;
4494         ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4495             GFP_KERNEL);
4496         if (!ioc->pd_handles) {
4497                 r = -ENOMEM;
4498                 goto out_free_resources;
4499         }
4500         ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4501             GFP_KERNEL);
4502         if (!ioc->blocking_handles) {
4503                 r = -ENOMEM;
4504                 goto out_free_resources;
4505         }
4506
4507         ioc->fwfault_debug = mpt3sas_fwfault_debug;
4508
4509         /* base internal command bits */
4510         mutex_init(&ioc->base_cmds.mutex);
4511         ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4512         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4513
4514         /* port_enable command bits */
4515         ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4516         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4517
4518         /* transport internal command bits */
4519         ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4520         ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
4521         mutex_init(&ioc->transport_cmds.mutex);
4522
4523         /* scsih internal command bits */
4524         ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4525         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
4526         mutex_init(&ioc->scsih_cmds.mutex);
4527
4528         /* task management internal command bits */
4529         ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4530         ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
4531         mutex_init(&ioc->tm_cmds.mutex);
4532
4533         /* config page internal command bits */
4534         ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4535         ioc->config_cmds.status = MPT3_CMD_NOT_USED;
4536         mutex_init(&ioc->config_cmds.mutex);
4537
4538         /* ctl module internal command bits */
4539         ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4540         ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4541         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
4542         mutex_init(&ioc->ctl_cmds.mutex);
4543
4544         if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4545             !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4546             !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4547             !ioc->ctl_cmds.sense) {
4548                 r = -ENOMEM;
4549                 goto out_free_resources;
4550         }
4551
4552         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4553                 ioc->event_masks[i] = -1;
4554
4555         /* here we enable the events we care about */
4556         _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4557         _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4558         _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4559         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4560         _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4561         _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4562         _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4563         _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4564         _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4565         _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4566
4567         r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4568         if (r)
4569                 goto out_free_resources;
4570
4571         return 0;
4572
4573  out_free_resources:
4574
4575         ioc->remove_host = 1;
4576
4577         mpt3sas_base_free_resources(ioc);
4578         _base_release_memory_pools(ioc);
4579         pci_set_drvdata(ioc->pdev, NULL);
4580         kfree(ioc->cpu_msix_table);
4581         kfree(ioc->pd_handles);
4582         kfree(ioc->blocking_handles);
4583         kfree(ioc->tm_cmds.reply);
4584         kfree(ioc->transport_cmds.reply);
4585         kfree(ioc->scsih_cmds.reply);
4586         kfree(ioc->config_cmds.reply);
4587         kfree(ioc->base_cmds.reply);
4588         kfree(ioc->port_enable_cmds.reply);
4589         kfree(ioc->ctl_cmds.reply);
4590         kfree(ioc->ctl_cmds.sense);
4591         kfree(ioc->pfacts);
4592         ioc->ctl_cmds.reply = NULL;
4593         ioc->base_cmds.reply = NULL;
4594         ioc->tm_cmds.reply = NULL;
4595         ioc->scsih_cmds.reply = NULL;
4596         ioc->transport_cmds.reply = NULL;
4597         ioc->config_cmds.reply = NULL;
4598         ioc->pfacts = NULL;
4599         return r;
4600 }
4601
4602
4603 /**
4604  * mpt3sas_base_detach - remove controller instance
4605  * @ioc: per adapter object
4606  *
4607  * Return nothing.
4608  */
4609 void
4610 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
4611 {
4612         dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4613             __func__));
4614
4615         mpt3sas_base_stop_watchdog(ioc);
4616         mpt3sas_base_free_resources(ioc);
4617         _base_release_memory_pools(ioc);
4618         pci_set_drvdata(ioc->pdev, NULL);
4619         kfree(ioc->cpu_msix_table);
4620         kfree(ioc->pd_handles);
4621         kfree(ioc->blocking_handles);
4622         kfree(ioc->pfacts);
4623         kfree(ioc->ctl_cmds.reply);
4624         kfree(ioc->ctl_cmds.sense);
4625         kfree(ioc->base_cmds.reply);
4626         kfree(ioc->port_enable_cmds.reply);
4627         kfree(ioc->tm_cmds.reply);
4628         kfree(ioc->transport_cmds.reply);
4629         kfree(ioc->scsih_cmds.reply);
4630         kfree(ioc->config_cmds.reply);
4631 }
4632
4633 /**
4634  * _base_reset_handler - reset callback handler (for base)
4635  * @ioc: per adapter object
4636  * @reset_phase: phase
4637  *
4638  * The handler for doing any required cleanup or initialization.
4639  *
4640  * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
4641  * MPT3_IOC_DONE_RESET
4642  *
4643  * Return nothing.
4644  */
4645 static void
4646 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
4647 {
4648         mpt3sas_scsih_reset_handler(ioc, reset_phase);
4649         mpt3sas_ctl_reset_handler(ioc, reset_phase);
4650         switch (reset_phase) {
4651         case MPT3_IOC_PRE_RESET:
4652                 dtmprintk(ioc, pr_info(MPT3SAS_FMT
4653                 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
4654                 break;
4655         case MPT3_IOC_AFTER_RESET:
4656                 dtmprintk(ioc, pr_info(MPT3SAS_FMT
4657                 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
4658                 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
4659                         ioc->transport_cmds.status |= MPT3_CMD_RESET;
4660                         mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4661                         complete(&ioc->transport_cmds.done);
4662                 }
4663                 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4664                         ioc->base_cmds.status |= MPT3_CMD_RESET;
4665                         mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
4666                         complete(&ioc->base_cmds.done);
4667                 }
4668                 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4669                         ioc->port_enable_failed = 1;
4670                         ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
4671                         mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4672                         if (ioc->is_driver_loading) {
4673                                 ioc->start_scan_failed =
4674                                     MPI2_IOCSTATUS_INTERNAL_ERROR;
4675                                 ioc->start_scan = 0;
4676                                 ioc->port_enable_cmds.status =
4677                                     MPT3_CMD_NOT_USED;
4678                         } else
4679                                 complete(&ioc->port_enable_cmds.done);
4680                 }
4681                 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
4682                         ioc->config_cmds.status |= MPT3_CMD_RESET;
4683                         mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
4684                         ioc->config_cmds.smid = USHRT_MAX;
4685                         complete(&ioc->config_cmds.done);
4686                 }
4687                 break;
4688         case MPT3_IOC_DONE_RESET:
4689                 dtmprintk(ioc, pr_info(MPT3SAS_FMT
4690                         "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
4691                 break;
4692         }
4693 }
4694
4695 /**
4696  * _wait_for_commands_to_complete - reset controller
4697  * @ioc: Pointer to MPT_ADAPTER structure
4698  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4699  *
4700  * This function waiting(3s) for all pending commands to complete
4701  * prior to putting controller in reset.
4702  */
4703 static void
4704 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4705 {
4706         u32 ioc_state;
4707         unsigned long flags;
4708         u16 i;
4709
4710         ioc->pending_io_count = 0;
4711         if (sleep_flag != CAN_SLEEP)
4712                 return;
4713
4714         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4715         if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4716                 return;
4717
4718         /* pending command count */
4719         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4720         for (i = 0; i < ioc->scsiio_depth; i++)
4721                 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4722                         ioc->pending_io_count++;
4723         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4724
4725         if (!ioc->pending_io_count)
4726                 return;
4727
4728         /* wait for pending commands to complete */
4729         wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4730 }
4731
4732 /**
4733  * mpt3sas_base_hard_reset_handler - reset controller
4734  * @ioc: Pointer to MPT_ADAPTER structure
4735  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4736  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4737  *
4738  * Returns 0 for success, non-zero for failure.
4739  */
4740 int
4741 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4742         enum reset_type type)
4743 {
4744         int r;
4745         unsigned long flags;
4746         u32 ioc_state;
4747         u8 is_fault = 0, is_trigger = 0;
4748
4749         dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
4750             __func__));
4751
4752         if (ioc->pci_error_recovery) {
4753                 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
4754                     ioc->name, __func__);
4755                 r = 0;
4756                 goto out_unlocked;
4757         }
4758
4759         if (mpt3sas_fwfault_debug)
4760                 mpt3sas_halt_firmware(ioc);
4761
4762         /* TODO - What we really should be doing is pulling
4763          * out all the code associated with NO_SLEEP; its never used.
4764          * That is legacy code from mpt fusion driver, ported over.
4765          * I will leave this BUG_ON here for now till its been resolved.
4766          */
4767         BUG_ON(sleep_flag == NO_SLEEP);
4768
4769         /* wait for an active reset in progress to complete */
4770         if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4771                 do {
4772                         ssleep(1);
4773                 } while (ioc->shost_recovery == 1);
4774                 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
4775                     __func__));
4776                 return ioc->ioc_reset_in_progress_status;
4777         }
4778
4779         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4780         ioc->shost_recovery = 1;
4781         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4782
4783         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
4784             MPT3_DIAG_BUFFER_IS_REGISTERED) &&
4785             (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
4786             MPT3_DIAG_BUFFER_IS_RELEASED))) {
4787                 is_trigger = 1;
4788                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4789                 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
4790                         is_fault = 1;
4791         }
4792         _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
4793         _wait_for_commands_to_complete(ioc, sleep_flag);
4794         _base_mask_interrupts(ioc);
4795         r = _base_make_ioc_ready(ioc, sleep_flag, type);
4796         if (r)
4797                 goto out;
4798         _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
4799
4800         /* If this hard reset is called while port enable is active, then
4801          * there is no reason to call make_ioc_operational
4802          */
4803         if (ioc->is_driver_loading && ioc->port_enable_failed) {
4804                 ioc->remove_host = 1;
4805                 r = -EFAULT;
4806                 goto out;
4807         }
4808         r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4809         if (r)
4810                 goto out;
4811         r = _base_make_ioc_operational(ioc, sleep_flag);
4812         if (!r)
4813                 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
4814
4815  out:
4816         dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
4817             ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4818
4819         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4820         ioc->ioc_reset_in_progress_status = r;
4821         ioc->shost_recovery = 0;
4822         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4823         ioc->ioc_reset_count++;
4824         mutex_unlock(&ioc->reset_in_progress_mutex);
4825
4826  out_unlocked:
4827         if ((r == 0) && is_trigger) {
4828                 if (is_fault)
4829                         mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
4830                 else
4831                         mpt3sas_trigger_master(ioc,
4832                             MASTER_TRIGGER_ADAPTER_RESET);
4833         }
4834         dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
4835             __func__));
4836         return r;
4837 }