]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/block/nvme-core.c
NVMe: Remove unused variables
[karo-tx-linux.git] / drivers / block / nvme-core.c
index bb2b861cfed9f4afb569567e3a524d333d43e09f..cf2d8e3c93a88b9812cf764ee8a30d4832e36c78 100644 (file)
@@ -49,7 +49,6 @@
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT          (admin_timeout * HZ)
 #define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
-#define IOD_TIMEOUT            (retry_time * HZ)
 
 static unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
@@ -59,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
-static unsigned char retry_time = 30;
-module_param(retry_time, byte, 0644);
-MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
-
 static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -81,7 +76,6 @@ static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
-static struct notifier_block nvme_nb;
 
 static struct class *nvme_class;
 
@@ -102,7 +96,6 @@ struct async_cmd_info {
  * commands and one for I/O commands).
  */
 struct nvme_queue {
-       struct llist_node node;
        struct device *q_dmadev;
        struct nvme_dev *dev;
        char irqname[24];       /* nvme4294967295-65535\0 */
@@ -1274,29 +1267,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       /*
-        * The aborted req will be completed on receiving the abort req.
-        * We enable the timer again. If hit twice, it'll cause a device reset,
-        * as the device then is in a faulty state.
-        */
-       int ret = BLK_EH_RESET_TIMER;
-
        dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
                                                        nvmeq->qid);
-
        spin_lock_irq(&nvmeq->q_lock);
-       if (!nvmeq->dev->initialized) {
-               /*
-                * Force cancelled command frees the request, which requires we
-                * return BLK_EH_NOT_HANDLED.
-                */
-               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
-               ret = BLK_EH_NOT_HANDLED;
-       } else
-               nvme_abort_req(req);
+       nvme_abort_req(req);
        spin_unlock_irq(&nvmeq->q_lock);
 
-       return ret;
+       /*
+        * The aborted req will be completed on receiving the abort req.
+        * We enable the timer again. If hit twice, it'll cause a device reset,
+        * as the device then is in a faulty state.
+        */
+       return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1349,7 +1331,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
        struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
 
        spin_lock_irq(&nvmeq->q_lock);
-       nvme_process_cq(nvmeq);
        if (hctx && hctx->tags)
                blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
@@ -1372,7 +1353,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
        }
        if (!qid && dev->admin_q)
                blk_mq_freeze_queue_start(dev->admin_q);
-       nvme_clear_queue(nvmeq);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -2121,8 +2105,7 @@ static int nvme_kthread(void *data)
                spin_lock(&dev_list_lock);
                list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
-                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
-                                                       dev->initialized) {
+                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
                                if (work_busy(&dev->reset_work))
                                        continue;
                                list_del_init(&dev->node);
@@ -2525,8 +2508,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
        struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
-       nvme_clear_queue(nvmeq);
        nvme_put_dq(dq);
 }
 
@@ -2669,7 +2650,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        int i;
        u32 csts = -1;
 
-       dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
        if (dev->bar) {
@@ -2680,7 +2660,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
                        nvme_suspend_queue(nvmeq);
-                       nvme_clear_queue(nvmeq);
                }
        } else {
                nvme_disable_io_queues(dev);
@@ -2688,6 +2667,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                nvme_disable_queue(dev, 0);
        }
        nvme_dev_unmap(dev);
+
+       for (i = dev->queue_count - 1; i >= 0; i--)
+               nvme_clear_queue(dev->queues[i]);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2800,6 +2782,10 @@ static int nvme_dev_open(struct inode *inode, struct file *f)
        spin_lock(&dev_list_lock);
        list_for_each_entry(dev, &dev_list, node) {
                if (dev->instance == instance) {
+                       if (!dev->admin_q) {
+                               ret = -EWOULDBLOCK;
+                               break;
+                       }
                        if (!kref_get_unless_zero(&dev->kref))
                                break;
                        f->private_data = dev;
@@ -2951,7 +2937,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                nvme_unfreeze_queues(dev);
                nvme_set_irq_hints(dev);
        }
-       dev->initialized = 1;
        return 0;
 }
 
@@ -2982,6 +2967,7 @@ static void nvme_reset_workfn(struct work_struct *work)
        dev->reset_workfn(work);
 }
 
+static void nvme_async_probe(struct work_struct *work);
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
@@ -3017,34 +3003,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto release;
 
        kref_init(&dev->kref);
-       result = nvme_dev_start(dev);
-       if (result)
-               goto release_pools;
-
        dev->device = device_create(nvme_class, &pdev->dev,
                                MKDEV(nvme_char_major, dev->instance),
                                dev, "nvme%d", dev->instance);
        if (IS_ERR(dev->device)) {
                result = PTR_ERR(dev->device);
-               goto shutdown;
+               goto release_pools;
        }
        get_device(dev->device);
 
-       if (dev->online_queues > 1)
-               result = nvme_dev_add(dev);
-       if (result)
-               goto device_del;
-
-       nvme_set_irq_hints(dev);
-       dev->initialized = 1;
+       INIT_WORK(&dev->probe_work, nvme_async_probe);
+       schedule_work(&dev->probe_work);
        return 0;
 
- device_del:
-       device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
- shutdown:
-       nvme_dev_shutdown(dev);
  release_pools:
-       nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
  release:
        nvme_release_instance(dev);
@@ -3057,6 +3029,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return result;
 }
 
+static void nvme_async_probe(struct work_struct *work)
+{
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
+       int result;
+
+       result = nvme_dev_start(dev);
+       if (result)
+               goto reset;
+
+       if (dev->online_queues > 1)
+               result = nvme_dev_add(dev);
+       if (result)
+               goto reset;
+
+       nvme_set_irq_hints(dev);
+       return;
+ reset:
+       if (!work_busy(&dev->reset_work)) {
+               dev->reset_workfn = nvme_reset_failed_dev;
+               queue_work(nvme_workq, &dev->reset_work);
+       }
+}
+
 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -3082,6 +3077,7 @@ static void nvme_remove(struct pci_dev *pdev)
        spin_unlock(&dev_list_lock);
 
        pci_set_drvdata(pdev, NULL);
+       flush_work(&dev->probe_work);
        flush_work(&dev->reset_work);
        nvme_dev_shutdown(dev);
        nvme_dev_remove(dev);
@@ -3200,7 +3196,6 @@ static int __init nvme_init(void)
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       unregister_hotcpu_notifier(&nvme_nb);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
        class_destroy(nvme_class);