]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Oct 2016 17:54:01 +0000 (10:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Oct 2016 17:54:01 +0000 (10:54 -0700)
Pull block fixes from Jens Axboe:
 "A set of fixes that missed the merge window, mostly due to me being
  away around that time.

  Nothing major here, a mix of nvme cleanups and fixes, and one fix for
  the badblocks handling"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvmet: use symbolic constants for CNS values
  nvme: use symbolic constants for CNS values
  nvme.h: add an enum for cns values
  nvme.h: don't use uuid_be
  nvme.h: resync with nvme-cli
  nvme: Add tertiary number to NVME_VS
  nvme : Add sysfs entry for NVMe CMBs when appropriate
  nvme: don't schedule multiple resets
  nvme: Delete created IO queues on reset
  nvme: Stop probing a removed device
  badblocks: fix overlapping check for clearing

1  2 
drivers/nvme/host/pci.c

diff --combined drivers/nvme/host/pci.c
index 0fc99f0f257110a063f3e58722b2f0ec08da655e,26a8d31b291ddfc5b12b7425c9614db1957929a2..0248d0e21feedb706b818b01cbfeea164322570c
@@@ -99,6 -99,7 +99,7 @@@ struct nvme_dev 
        dma_addr_t cmb_dma_addr;
        u64 cmb_size;
        u32 cmbsz;
+       u32 cmbloc;
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
  };
@@@ -515,8 -516,7 +516,8 @@@ static int nvme_map_data(struct nvme_de
                goto out;
  
        ret = BLK_MQ_RQ_QUEUE_BUSY;
 -      if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
 +      if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
 +                              DMA_ATTR_NO_WARN))
                goto out;
  
        if (!nvme_setup_prps(dev, req, size))
@@@ -893,7 -893,7 +894,7 @@@ static enum blk_eh_timer_return nvme_ti
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
-               queue_work(nvme_workq, &dev->reset_work);
+               nvme_reset(dev);
  
                /*
                 * Mark the request as handled, since the inline shutdown
@@@ -1214,7 -1214,7 +1215,7 @@@ static int nvme_configure_admin_queue(s
        u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
        struct nvme_queue *nvmeq;
  
-       dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
+       dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
                                                NVME_CAP_NSSRC(cap) : 0;
  
        if (dev->subsystem &&
@@@ -1291,7 -1291,7 +1292,7 @@@ static void nvme_watchdog_timer(unsigne
  
        /* Skip controllers under certain specific conditions. */
        if (nvme_should_reset(dev, csts)) {
-               if (queue_work(nvme_workq, &dev->reset_work))
+               if (!nvme_reset(dev))
                        dev_warn(dev->dev,
                                "Failed status: 0x%x, reset controller.\n",
                                csts);
@@@ -1331,28 -1331,37 +1332,37 @@@ static int nvme_create_io_queues(struc
        return ret >= 0 ? 0 : ret;
  }
  
+ static ssize_t nvme_cmb_show(struct device *dev,
+                            struct device_attribute *attr,
+                            char *buf)
+ {
+       struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+       return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz  : x%08x\n",
+                      ndev->cmbloc, ndev->cmbsz);
+ }
+ static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
  static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
  {
        u64 szu, size, offset;
-       u32 cmbloc;
        resource_size_t bar_size;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        void __iomem *cmb;
        dma_addr_t dma_addr;
  
-       if (!use_cmb_sqes)
-               return NULL;
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!(NVME_CMB_SZ(dev->cmbsz)))
                return NULL;
+       dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
  
-       cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+       if (!use_cmb_sqes)
+               return NULL;
  
        szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
        size = szu * NVME_CMB_SZ(dev->cmbsz);
-       offset = szu * NVME_CMB_OFST(cmbloc);
-       bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+       offset = szu * NVME_CMB_OFST(dev->cmbloc);
+       bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
  
        if (offset > bar_size)
                return NULL;
        if (size > bar_size - offset)
                size = bar_size - offset;
  
-       dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+       dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
        cmb = ioremap_wc(dma_addr, size);
        if (!cmb)
                return NULL;
@@@ -1511,9 -1520,9 +1521,9 @@@ static int nvme_delete_queue(struct nvm
        return 0;
  }
  
- static void nvme_disable_io_queues(struct nvme_dev *dev)
+ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
  {
-       int pass, queues = dev->online_queues - 1;
+       int pass;
        unsigned long timeout;
        u8 opcode = nvme_admin_delete_sq;
  
@@@ -1616,9 -1625,25 +1626,25 @@@ static int nvme_pci_enable(struct nvme_
                        dev->q_depth);
        }
  
-       if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
+       /*
+        * CMBs can currently only exist on >=1.2 PCIe devices. We only
+        * populate sysfs if a CMB is implemented. Note that we add the
+        * CMB attribute to the nvme_ctrl kobj which removes the need to remove
+        * it on exit. Since nvme_dev_attrs_group has no name we can pass
+        * NULL as final argument to sysfs_add_file_to_group.
+        */
+       if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
                dev->cmb = nvme_map_cmb(dev);
  
+               if (dev->cmbsz) {
+                       if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+                                                   &dev_attr_cmb.attr, NULL))
+                               dev_warn(dev->dev,
+                                        "failed to add sysfs attribute for CMB\n");
+               }
+       }
        pci_enable_pcie_error_reporting(pdev);
        pci_save_state(pdev);
        return 0;
@@@ -1649,7 -1674,7 +1675,7 @@@ static void nvme_pci_disable(struct nvm
  
  static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
  {
-       int i;
+       int i, queues;
        u32 csts = -1;
  
        del_timer_sync(&dev->watchdog_timer);
                csts = readl(dev->bar + NVME_REG_CSTS);
        }
  
+       queues = dev->online_queues - 1;
        for (i = dev->queue_count - 1; i > 0; i--)
                nvme_suspend_queue(dev->queues[i]);
  
                if (dev->queue_count)
                        nvme_suspend_queue(dev->queues[0]);
        } else {
-               nvme_disable_io_queues(dev);
+               nvme_disable_io_queues(dev, queues);
                nvme_disable_admin_queue(dev, shutdown);
        }
        nvme_pci_disable(dev);
@@@ -1818,11 -1844,10 +1845,10 @@@ static int nvme_reset(struct nvme_dev *
  {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
                return -ENODEV;
+       if (work_busy(&dev->reset_work))
+               return -ENODEV;
        if (!queue_work(nvme_workq, &dev->reset_work))
                return -EBUSY;
-       flush_work(&dev->reset_work);
        return 0;
  }
  
@@@ -1846,7 -1871,12 +1872,12 @@@ static int nvme_pci_reg_read64(struct n
  
  static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
  {
-       return nvme_reset(to_nvme_dev(ctrl));
+       struct nvme_dev *dev = to_nvme_dev(ctrl);
+       int ret = nvme_reset(dev);
+       if (!ret)
+               flush_work(&dev->reset_work);
+       return ret;
  }
  
  static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@@ -1940,7 -1970,7 +1971,7 @@@ static void nvme_reset_notify(struct pc
        if (prepare)
                nvme_dev_disable(dev, false);
        else
-               queue_work(nvme_workq, &dev->reset_work);
+               nvme_reset(dev);
  }
  
  static void nvme_shutdown(struct pci_dev *pdev)
@@@ -2009,7 -2039,7 +2040,7 @@@ static int nvme_resume(struct device *d
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
  
-       queue_work(nvme_workq, &ndev->reset_work);
+       nvme_reset(ndev);
        return 0;
  }
  #endif
@@@ -2048,7 -2078,7 +2079,7 @@@ static pci_ers_result_t nvme_slot_reset
  
        dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
-       queue_work(nvme_workq, &dev->reset_work);
+       nvme_reset(dev);
        return PCI_ERS_RESULT_RECOVERED;
  }