5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/bitops.h>
34 #include <linux/genhd.h>
35 #include <linux/idr.h>
37 #include "rsxx_priv.h"
42 MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
43 MODULE_AUTHOR("IBM <support@ramsan.com>");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRIVER_VERSION);
47 static unsigned int force_legacy = NO_LEGACY;
48 module_param(force_legacy, uint, 0444);
49 MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
51 static DEFINE_IDA(rsxx_disk_ida);
52 static DEFINE_SPINLOCK(rsxx_ida_lock);
54 /*----------------- Interrupt Control & Handling -------------------*/
55 static void __enable_intr(unsigned int *mask, unsigned int intr)
60 static void __disable_intr(unsigned int *mask, unsigned int intr)
66 * NOTE: Disabling the IER will disable the hardware interrupt.
67 * Disabling the ISR will disable the software handling of the ISR bit.
69 * Enable/Disable interrupt functions assume the card->irq_lock
70 * is held by the caller.
72 void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
74 if (unlikely(card->halt))
77 __enable_intr(&card->ier_mask, intr);
78 iowrite32(card->ier_mask, card->regmap + IER);
81 void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
83 __disable_intr(&card->ier_mask, intr);
84 iowrite32(card->ier_mask, card->regmap + IER);
87 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
90 if (unlikely(card->halt))
93 __enable_intr(&card->isr_mask, intr);
94 __enable_intr(&card->ier_mask, intr);
95 iowrite32(card->ier_mask, card->regmap + IER);
97 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
100 __disable_intr(&card->isr_mask, intr);
101 __disable_intr(&card->ier_mask, intr);
102 iowrite32(card->ier_mask, card->regmap + IER);
105 irqreturn_t rsxx_isr(int irq, void *pdata)
107 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) pdata;
113 spin_lock(&card->irq_lock);
118 isr = ioread32(card->regmap + ISR);
119 if (isr == 0xffffffff) {
121 * A few systems seem to have an intermittent issue
122 * where PCI reads return all Fs, but retrying the read
123 * a little later will return as expected.
125 dev_info(CARD_TO_DEV(card),
126 "ISR = 0xFFFFFFFF, retrying later\n");
130 isr &= card->isr_mask;
134 for (i = 0; i < card->n_targets; i++) {
135 if (isr & CR_INTR_DMA(i)) {
136 if (card->ier_mask & CR_INTR_DMA(i)) {
137 rsxx_disable_ier(card, CR_INTR_DMA(i));
140 queue_work(card->ctrl[i].done_wq,
141 &card->ctrl[i].dma_done_work);
146 if (isr & CR_INTR_CREG) {
147 schedule_work(&card->creg_ctrl.done_work);
151 if (isr & CR_INTR_EVENT) {
152 schedule_work(&card->event_work);
153 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
156 } while (reread_isr);
158 spin_unlock(&card->irq_lock);
160 return handled ? IRQ_HANDLED : IRQ_NONE;
163 /*----------------- Card Event Handler -------------------*/
164 static void card_state_change(struct rsxx_cardinfo *card,
165 unsigned int new_state)
169 dev_info(CARD_TO_DEV(card),
170 "card state change detected.(%s -> %s)\n",
171 rsxx_card_state_to_str(card->state),
172 rsxx_card_state_to_str(new_state));
174 card->state = new_state;
176 /* Don't attach DMA interfaces if the card has an invalid config */
177 if (!card->config_valid)
181 case CARD_STATE_RD_ONLY_FAULT:
182 dev_crit(CARD_TO_DEV(card),
183 "Hardware has entered read-only mode!\n");
185 * Fall through so the DMA devices can be attached and
186 * the user can attempt to pull off their data.
188 case CARD_STATE_GOOD:
189 st = rsxx_get_card_size8(card, &card->size8);
191 dev_err(CARD_TO_DEV(card),
192 "Failed attaching DMA devices\n");
194 if (card->config_valid)
195 set_capacity(card->gendisk, card->size8 >> 9);
198 case CARD_STATE_FAULT:
199 dev_crit(CARD_TO_DEV(card),
200 "Hardware Fault reported!\n");
203 /* Everything else, detach DMA interface if it's attached. */
204 case CARD_STATE_SHUTDOWN:
205 case CARD_STATE_STARTING:
206 case CARD_STATE_FORMATTING:
207 case CARD_STATE_UNINITIALIZED:
208 case CARD_STATE_SHUTTING_DOWN:
210 * dStroy is a term coined by marketing to represent the low level
213 case CARD_STATE_DSTROYING:
214 set_capacity(card->gendisk, 0);
219 static void card_event_handler(struct work_struct *work)
221 struct rsxx_cardinfo *card;
226 card = container_of(work, struct rsxx_cardinfo, event_work);
228 if (unlikely(card->halt))
232 * Enable the interrupt now to avoid any weird race conditions where a
233 * state change might occur while rsxx_get_card_state() is
234 * processing a returned creg cmd.
236 spin_lock_irqsave(&card->irq_lock, flags);
237 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
238 spin_unlock_irqrestore(&card->irq_lock, flags);
240 st = rsxx_get_card_state(card, &state);
242 dev_info(CARD_TO_DEV(card),
243 "Failed reading state after event.\n");
247 if (card->state != state)
248 card_state_change(card, state);
250 if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
251 rsxx_read_hw_log(card);
255 char *rsxx_card_state_to_str(unsigned int state)
257 static char *state_strings[] = {
258 "Unknown", "Shutdown", "Starting", "Formatting",
259 "Uninitialized", "Good", "Shutting Down",
260 "Fault", "Read Only Fault", "dStroying"
263 return state_strings[ffs(state)];
266 /*----------------- Card Operations -------------------*/
267 static int card_shutdown(struct rsxx_cardinfo *card)
271 const int timeout = msecs_to_jiffies(120000);
274 /* We can't issue a shutdown if the card is in a transition state */
277 st = rsxx_get_card_state(card, &state);
280 } while (state == CARD_STATE_STARTING &&
281 (jiffies - start < timeout));
283 if (state == CARD_STATE_STARTING)
286 /* Only issue a shutdown if we need to */
287 if ((state != CARD_STATE_SHUTTING_DOWN) &&
288 (state != CARD_STATE_SHUTDOWN)) {
289 st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
296 st = rsxx_get_card_state(card, &state);
299 } while (state != CARD_STATE_SHUTDOWN &&
300 (jiffies - start < timeout));
302 if (state != CARD_STATE_SHUTDOWN)
308 /*----------------- Driver Initialization & Setup -------------------*/
309 /* Returns: 0 if the driver is compatible with the device
310 -1 if the driver is NOT compatible with the device */
311 static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
313 unsigned char pci_rev;
315 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
317 if (pci_rev > RS70_PCI_REV_SUPPORTED)
322 static int __devinit rsxx_pci_probe(struct pci_dev *dev,
323 const struct pci_device_id *id)
325 struct rsxx_cardinfo *card;
329 dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
331 card = kzalloc(sizeof(*card), GFP_KERNEL);
336 pci_set_drvdata(dev, card);
339 if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
344 spin_lock(&rsxx_ida_lock);
345 st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
346 spin_unlock(&rsxx_ida_lock);
347 } while (st == -EAGAIN);
352 st = pci_enable_device(dev);
357 pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
359 st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
361 dev_err(CARD_TO_DEV(card),
362 "No usable DMA configuration,aborting\n");
363 goto failed_dma_mask;
366 st = pci_request_regions(dev, DRIVER_NAME);
368 dev_err(CARD_TO_DEV(card),
369 "Failed to request memory region\n");
370 goto failed_request_regions;
373 if (pci_resource_len(dev, 0) == 0) {
374 dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
379 card->regmap = pci_iomap(dev, 0, 0);
381 dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
386 spin_lock_init(&card->irq_lock);
389 spin_lock_irqsave(&card->irq_lock, flags);
390 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
391 spin_unlock_irqrestore(&card->irq_lock, flags);
394 st = pci_enable_msi(dev);
396 dev_warn(CARD_TO_DEV(card),
397 "Failed to enable MSI\n");
400 st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
403 dev_err(CARD_TO_DEV(card),
404 "Failed requesting IRQ%d\n", dev->irq);
408 /************* Setup Processor Command Interface *************/
409 rsxx_creg_setup(card);
411 spin_lock_irqsave(&card->irq_lock, flags);
412 rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
413 spin_unlock_irqrestore(&card->irq_lock, flags);
415 st = rsxx_compatibility_check(card);
417 dev_warn(CARD_TO_DEV(card),
418 "Incompatible driver detected. Please update the driver.\n");
420 goto failed_compatiblity_check;
423 /************* Load Card Config *************/
424 st = rsxx_load_config(card);
426 dev_err(CARD_TO_DEV(card),
427 "Failed loading card config\n");
429 /************* Setup DMA Engine *************/
430 st = rsxx_get_num_targets(card, &card->n_targets);
432 dev_info(CARD_TO_DEV(card),
433 "Failed reading the number of DMA targets\n");
435 card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
438 goto failed_dma_setup;
441 st = rsxx_dma_setup(card);
443 dev_info(CARD_TO_DEV(card),
444 "Failed to setup DMA engine\n");
445 goto failed_dma_setup;
448 /************* Setup Card Event Handler *************/
449 INIT_WORK(&card->event_work, card_event_handler);
451 st = rsxx_setup_dev(card);
453 goto failed_create_dev;
455 rsxx_get_card_state(card, &card->state);
457 dev_info(CARD_TO_DEV(card),
459 rsxx_card_state_to_str(card->state));
462 * Now that the DMA Engine and devices have been setup,
463 * we can enable the event interrupt(it kicks off actions in
464 * those layers so we couldn't enable it right away.)
466 spin_lock_irqsave(&card->irq_lock, flags);
467 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
468 spin_unlock_irqrestore(&card->irq_lock, flags);
470 if (card->state == CARD_STATE_SHUTDOWN) {
471 st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
473 dev_crit(CARD_TO_DEV(card),
474 "Failed issuing card startup\n");
475 } else if (card->state == CARD_STATE_GOOD ||
476 card->state == CARD_STATE_RD_ONLY_FAULT) {
477 st = rsxx_get_card_size8(card, &card->size8);
482 rsxx_attach_dev(card);
487 rsxx_dma_destroy(card);
489 failed_compatiblity_check:
490 spin_lock_irqsave(&card->irq_lock, flags);
491 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
492 spin_unlock_irqrestore(&card->irq_lock, flags);
493 free_irq(dev->irq, card);
495 pci_disable_msi(dev);
497 pci_iounmap(dev, card->regmap);
499 pci_release_regions(dev);
500 failed_request_regions:
502 pci_disable_device(dev);
504 spin_lock(&rsxx_ida_lock);
505 ida_remove(&rsxx_disk_ida, card->disk_id);
506 spin_unlock(&rsxx_ida_lock);
513 static void __devexit rsxx_pci_remove(struct pci_dev *dev)
515 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
523 dev_info(CARD_TO_DEV(card),
524 "Removing PCI-Flash SSD.\n");
526 rsxx_detach_dev(card);
528 for (i = 0; i < card->n_targets; i++) {
529 spin_lock_irqsave(&card->irq_lock, flags);
530 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
531 spin_unlock_irqrestore(&card->irq_lock, flags);
534 st = card_shutdown(card);
536 dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
538 /* Sync outstanding event handlers. */
539 spin_lock_irqsave(&card->irq_lock, flags);
540 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
541 spin_unlock_irqrestore(&card->irq_lock, flags);
543 /* Prevent work_structs from re-queuing themselves. */
546 cancel_work_sync(&card->event_work);
548 rsxx_destroy_dev(card);
549 rsxx_dma_destroy(card);
551 spin_lock_irqsave(&card->irq_lock, flags);
552 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
553 spin_unlock_irqrestore(&card->irq_lock, flags);
554 free_irq(dev->irq, card);
557 pci_disable_msi(dev);
559 rsxx_creg_destroy(card);
561 pci_iounmap(dev, card->regmap);
563 pci_disable_device(dev);
564 pci_release_regions(dev);
569 static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
571 /* We don't support suspend at this time. */
575 static void rsxx_pci_shutdown(struct pci_dev *dev)
577 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
584 dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
586 rsxx_detach_dev(card);
588 for (i = 0; i < card->n_targets; i++) {
589 spin_lock_irqsave(&card->irq_lock, flags);
590 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
591 spin_unlock_irqrestore(&card->irq_lock, flags);
597 static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
598 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
599 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
600 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
601 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
605 MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
607 static struct pci_driver rsxx_pci_driver = {
609 .id_table = rsxx_pci_ids,
610 .probe = rsxx_pci_probe,
611 .remove = __devexit_p(rsxx_pci_remove),
612 .suspend = rsxx_pci_suspend,
613 .shutdown = rsxx_pci_shutdown,
616 static int __init rsxx_core_init(void)
620 st = rsxx_dev_init();
624 st = rsxx_dma_init();
626 goto dma_init_failed;
628 st = rsxx_creg_init();
630 goto creg_init_failed;
632 return pci_register_driver(&rsxx_pci_driver);
642 static void __exit rsxx_core_cleanup(void)
644 pci_unregister_driver(&rsxx_pci_driver);
650 module_init(rsxx_core_init);
651 module_exit(rsxx_core_cleanup);