]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/nvme/host/core.c
nvme: provide UUID value to userspace
[karo-tx-linux.git] / drivers / nvme / host / core.c
index 903d5813023a93588c08857ff0db1339bbb99c86..89a7fe422e1a666511656705788b94b9ec9a25a2 100644 (file)
@@ -65,34 +65,29 @@ static bool force_apst;
 module_param(force_apst, bool, 0644);
 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
 
+struct workqueue_struct *nvme_wq;
+EXPORT_SYMBOL_GPL(nvme_wq);
+
 static LIST_HEAD(nvme_ctrl_list);
 static DEFINE_SPINLOCK(dev_list_lock);
 
 static struct class *nvme_class;
 
-static int nvme_error_status(struct request *req)
+static blk_status_t nvme_error_status(struct request *req)
 {
        switch (nvme_req(req)->status & 0x7ff) {
        case NVME_SC_SUCCESS:
-               return 0;
+               return BLK_STS_OK;
        case NVME_SC_CAP_EXCEEDED:
-               return -ENOSPC;
-       default:
-               return -EIO;
-
-       /*
-        * XXX: these errors are a nasty side-band protocol to
-        * drivers/md/dm-mpath.c:noretry_error() that aren't documented
-        * anywhere..
-        */
-       case NVME_SC_CMD_SEQ_ERROR:
-               return -EILSEQ;
+               return BLK_STS_NOSPC;
        case NVME_SC_ONCS_NOT_SUPPORTED:
-               return -EOPNOTSUPP;
+               return BLK_STS_NOTSUPP;
        case NVME_SC_WRITE_FAULT:
        case NVME_SC_READ_ERROR:
        case NVME_SC_UNWRITTEN_BLOCK:
-               return -ENODATA;
+               return BLK_STS_MEDIUM;
+       default:
+               return BLK_STS_IOERR;
        }
 }
 
@@ -165,7 +160,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                switch (old_state) {
                case NVME_CTRL_NEW:
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_RECONNECTING:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -291,7 +285,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
        cmnd->common.nsid = cpu_to_le32(ns->ns_id);
 }
 
-static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmnd)
 {
        unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
@@ -300,7 +294,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
        range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
        if (!range)
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        __rq_for_each_bio(bio, req) {
                u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
@@ -314,7 +308,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
        if (WARN_ON_ONCE(n != segments)) {
                kfree(range);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        memset(cmnd, 0, sizeof(*cmnd));
@@ -328,7 +322,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
        req->special_vec.bv_len = sizeof(*range) * segments;
        req->rq_flags |= RQF_SPECIAL_PAYLOAD;
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -372,10 +366,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 }
 
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd)
 {
-       int ret = BLK_MQ_RQ_QUEUE_OK;
+       blk_status_t ret = BLK_STS_OK;
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
                nvme_req(req)->retries = 0;
@@ -402,7 +396,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                break;
        default:
                WARN_ON_ONCE(1);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        cmd->common.command_id = req->tag;
@@ -555,15 +549,16 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                        result, timeout);
 }
 
-static void nvme_keep_alive_end_io(struct request *rq, int error)
+static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
 
        blk_mq_free_request(rq);
 
-       if (error) {
+       if (status) {
                dev_err(ctrl->device,
-                       "failed nvme_keep_alive_end_io error=%d\n", error);
+                       "failed nvme_keep_alive_end_io error=%d\n",
+                               status);
                return;
        }
 
@@ -643,6 +638,77 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
        return error;
 }
 
+static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid)
+{
+       struct nvme_command c = { };
+       int status;
+       void *data;
+       int pos;
+       int len;
+
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.nsid = cpu_to_le32(nsid);
+       c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
+
+       data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data,
+                                     NVME_IDENTIFY_DATA_SIZE);
+       if (status)
+               goto free_data;
+
+       for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+               struct nvme_ns_id_desc *cur = data + pos;
+
+               if (cur->nidl == 0)
+                       break;
+
+               switch (cur->nidt) {
+               case NVME_NIDT_EUI64:
+                       if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_EUI64_LEN;
+                       memcpy(ns->eui, data + pos + sizeof(*cur), len);
+                       break;
+               case NVME_NIDT_NGUID:
+                       if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_NGUID_LEN;
+                       memcpy(ns->nguid, data + pos + sizeof(*cur), len);
+                       break;
+               case NVME_NIDT_UUID:
+                       if (cur->nidl != NVME_NIDT_UUID_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_UUID_LEN;
+                       uuid_copy(&ns->uuid, data + pos + sizeof(*cur));
+                       break;
+               default:
+                       /* Skip unnkown types */
+                       len = cur->nidl;
+                       break;
+               }
+
+               len += sizeof(*cur);
+       }
+free_data:
+       kfree(data);
+       return status;
+}
+
 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
 {
        struct nvme_command c = { };
@@ -1016,7 +1082,15 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
        if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
                memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
        if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
-               memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+               memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid));
+       if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) {
+                /* Don't treat error as fatal we potentially
+                 * already have a NGUID or EUI-64
+                 */
+               if (nvme_identify_ns_descs(ns, ns->ns_id))
+                       dev_warn(ns->ctrl->device,
+                                "%s: Identify Descriptors failed\n", __func__);
+       }
 
        return 0;
 }
@@ -1640,6 +1714,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                }
        } else {
                ctrl->cntlid = le16_to_cpu(id->cntlid);
+               ctrl->hmpre = le32_to_cpu(id->hmpre);
+               ctrl->hmmin = le32_to_cpu(id->hmmin);
        }
 
        kfree(id);
@@ -1787,8 +1863,8 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
        int serial_len = sizeof(ctrl->serial);
        int model_len = sizeof(ctrl->model);
 
-       if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
-               return sprintf(buf, "eui.%16phN\n", ns->uuid);
+       if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+               return sprintf(buf, "eui.%16phN\n", ns->nguid);
 
        if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
                return sprintf(buf, "eui.%8phN\n", ns->eui);
@@ -1803,11 +1879,28 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
 
+static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+       return sprintf(buf, "%pU\n", ns->nguid);
+}
+static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
+
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
                                                                char *buf)
 {
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       return sprintf(buf, "%pU\n", ns->uuid);
+
+       /* For backward compatibility expose the NGUID to userspace if
+        * we have no UUID set
+        */
+       if (uuid_is_null(&ns->uuid)) {
+               printk_ratelimited(KERN_WARNING
+                                  "No UUID available providing old NGUID\n");
+               return sprintf(buf, "%pU\n", ns->nguid);
+       }
+       return sprintf(buf, "%pU\n", &ns->uuid);
 }
 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
 
@@ -1830,6 +1923,7 @@ static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
 static struct attribute *nvme_ns_attrs[] = {
        &dev_attr_wwid.attr,
        &dev_attr_uuid.attr,
+       &dev_attr_nguid.attr,
        &dev_attr_eui.attr,
        &dev_attr_nsid.attr,
        NULL,
@@ -1842,7 +1936,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
        if (a == &dev_attr_uuid.attr) {
-               if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+               if (uuid_is_null(&ns->uuid) ||
+                   !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+                       return 0;
+       }
+       if (a == &dev_attr_nguid.attr) {
+               if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
                        return 0;
        }
        if (a == &dev_attr_eui.attr) {
@@ -2231,7 +2330,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
         * removal.
         */
        if (ctrl->state == NVME_CTRL_LIVE)
-               schedule_work(&ctrl->scan_work);
+               queue_work(nvme_wq, &ctrl->scan_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
@@ -2286,7 +2385,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
                /*FALLTHRU*/
        case NVME_SC_ABORT_REQ:
                ++ctrl->event_limit;
-               schedule_work(&ctrl->async_event_work);
+               queue_work(nvme_wq, &ctrl->async_event_work);
                break;
        default:
                break;
@@ -2309,7 +2408,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
        ctrl->event_limit = NVME_NR_AERS;
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
 
@@ -2544,10 +2643,15 @@ int __init nvme_core_init(void)
 {
        int result;
 
+       nvme_wq = alloc_workqueue("nvme-wq",
+                       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+       if (!nvme_wq)
+               return -ENOMEM;
+
        result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
                                                        &nvme_dev_fops);
        if (result < 0)
-               return result;
+               goto destroy_wq;
        else if (result > 0)
                nvme_char_major = result;
 
@@ -2559,8 +2663,10 @@ int __init nvme_core_init(void)
 
        return 0;
 
- unregister_chrdev:
+unregister_chrdev:
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+destroy_wq:
+       destroy_workqueue(nvme_wq);
        return result;
 }
 
@@ -2568,6 +2674,7 @@ void nvme_core_exit(void)
 {
        class_destroy(nvme_class);
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+       destroy_workqueue(nvme_wq);
 }
 
 MODULE_LICENSE("GPL");