2 * NVMe over Fabrics loopback device.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/scatterlist.h>
16 #include <linux/blk-mq.h>
17 #include <linux/nvme.h>
18 #include <linux/module.h>
19 #include <linux/parser.h>
21 #include "../host/nvme.h"
22 #include "../host/fabrics.h"
24 #define NVME_LOOP_AQ_DEPTH 256
26 #define NVME_LOOP_MAX_SEGMENTS 256
29 * We handle AEN commands ourselves and don't even let the
30 * block layer know about them.
32 #define NVME_LOOP_NR_AEN_COMMANDS 1
33 #define NVME_LOOP_AQ_BLKMQ_DEPTH \
34 (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
36 struct nvme_loop_iod {
37 struct nvme_request nvme_req;
38 struct nvme_command cmd;
39 struct nvme_completion rsp;
41 struct nvme_loop_queue *queue;
42 struct work_struct work;
43 struct sg_table sg_table;
44 struct scatterlist first_sgl[];
47 struct nvme_loop_ctrl {
49 struct nvme_loop_queue *queues;
52 struct blk_mq_tag_set admin_tag_set;
54 struct list_head list;
56 struct blk_mq_tag_set tag_set;
57 struct nvme_loop_iod async_event_iod;
58 struct nvme_ctrl ctrl;
60 struct nvmet_ctrl *target_ctrl;
61 struct work_struct delete_work;
62 struct work_struct reset_work;
65 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
67 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
70 struct nvme_loop_queue {
71 struct nvmet_cq nvme_cq;
72 struct nvmet_sq nvme_sq;
73 struct nvme_loop_ctrl *ctrl;
76 static struct nvmet_port *nvmet_loop_port;
78 static LIST_HEAD(nvme_loop_ctrl_list);
79 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
81 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
82 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
84 static struct nvmet_fabrics_ops nvme_loop_ops;
86 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
88 return queue - queue->ctrl->queues;
91 static void nvme_loop_complete_rq(struct request *req)
93 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
96 nvme_cleanup_cmd(req);
97 sg_free_table_chained(&iod->sg_table, true);
99 if (unlikely(req->errors)) {
100 if (nvme_req_needs_retry(req, req->errors)) {
102 nvme_requeue_req(req);
106 if (blk_rq_is_passthrough(req))
109 error = nvme_error_status(req->errors);
112 blk_mq_end_request(req, error);
115 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
117 u32 queue_idx = nvme_loop_queue_idx(queue);
120 return queue->ctrl->admin_tag_set.tags[queue_idx];
121 return queue->ctrl->tag_set.tags[queue_idx - 1];
124 static void nvme_loop_queue_response(struct nvmet_req *req)
126 struct nvme_loop_queue *queue =
127 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
128 struct nvme_completion *cqe = req->rsp;
131 * AEN requests are special as they don't time out and can
132 * survive any kind of queue freeze and often don't respond to
133 * aborts. We don't even bother to allocate a struct request
134 * for them but rather special case them here.
136 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
137 cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
138 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
142 struct nvme_loop_iod *iod;
144 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
146 dev_err(queue->ctrl->ctrl.device,
147 "tag 0x%x on queue %d not found\n",
148 cqe->command_id, nvme_loop_queue_idx(queue));
152 iod = blk_mq_rq_to_pdu(rq);
153 iod->nvme_req.result = cqe->result;
154 blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
158 static void nvme_loop_execute_work(struct work_struct *work)
160 struct nvme_loop_iod *iod =
161 container_of(work, struct nvme_loop_iod, work);
163 iod->req.execute(&iod->req);
166 static enum blk_eh_timer_return
167 nvme_loop_timeout(struct request *rq, bool reserved)
169 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
171 /* queue error recovery */
172 schedule_work(&iod->queue->ctrl->reset_work);
174 /* fail with DNR on admin cmd timeout */
175 rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
177 return BLK_EH_HANDLED;
180 static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
181 const struct blk_mq_queue_data *bd)
183 struct nvme_ns *ns = hctx->queue->queuedata;
184 struct nvme_loop_queue *queue = hctx->driver_data;
185 struct request *req = bd->rq;
186 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
189 ret = nvme_setup_cmd(ns, req, &iod->cmd);
190 if (ret != BLK_MQ_RQ_QUEUE_OK)
193 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
194 iod->req.port = nvmet_loop_port;
195 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
196 &queue->nvme_sq, &nvme_loop_ops)) {
197 nvme_cleanup_cmd(req);
198 blk_mq_start_request(req);
199 nvme_loop_queue_response(&iod->req);
200 return BLK_MQ_RQ_QUEUE_OK;
203 if (blk_rq_bytes(req)) {
204 iod->sg_table.sgl = iod->first_sgl;
205 ret = sg_alloc_table_chained(&iod->sg_table,
206 blk_rq_nr_phys_segments(req),
209 return BLK_MQ_RQ_QUEUE_BUSY;
211 iod->req.sg = iod->sg_table.sgl;
212 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
215 blk_mq_start_request(req);
217 schedule_work(&iod->work);
218 return BLK_MQ_RQ_QUEUE_OK;
221 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
223 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
224 struct nvme_loop_queue *queue = &ctrl->queues[0];
225 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
227 memset(&iod->cmd, 0, sizeof(iod->cmd));
228 iod->cmd.common.opcode = nvme_admin_async_event;
229 iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
230 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
232 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
234 dev_err(ctrl->ctrl.device, "failed async event work\n");
238 schedule_work(&iod->work);
241 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
242 struct nvme_loop_iod *iod, unsigned int queue_idx)
244 iod->req.cmd = &iod->cmd;
245 iod->req.rsp = &iod->rsp;
246 iod->queue = &ctrl->queues[queue_idx];
247 INIT_WORK(&iod->work, nvme_loop_execute_work);
251 static int nvme_loop_init_request(void *data, struct request *req,
252 unsigned int hctx_idx, unsigned int rq_idx,
253 unsigned int numa_node)
255 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
258 static int nvme_loop_init_admin_request(void *data, struct request *req,
259 unsigned int hctx_idx, unsigned int rq_idx,
260 unsigned int numa_node)
262 return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
265 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
266 unsigned int hctx_idx)
268 struct nvme_loop_ctrl *ctrl = data;
269 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
271 BUG_ON(hctx_idx >= ctrl->queue_count);
273 hctx->driver_data = queue;
277 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
278 unsigned int hctx_idx)
280 struct nvme_loop_ctrl *ctrl = data;
281 struct nvme_loop_queue *queue = &ctrl->queues[0];
283 BUG_ON(hctx_idx != 0);
285 hctx->driver_data = queue;
289 static const struct blk_mq_ops nvme_loop_mq_ops = {
290 .queue_rq = nvme_loop_queue_rq,
291 .complete = nvme_loop_complete_rq,
292 .init_request = nvme_loop_init_request,
293 .init_hctx = nvme_loop_init_hctx,
294 .timeout = nvme_loop_timeout,
297 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
298 .queue_rq = nvme_loop_queue_rq,
299 .complete = nvme_loop_complete_rq,
300 .init_request = nvme_loop_init_admin_request,
301 .init_hctx = nvme_loop_init_admin_hctx,
302 .timeout = nvme_loop_timeout,
305 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
307 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
308 blk_cleanup_queue(ctrl->ctrl.admin_q);
309 blk_mq_free_tag_set(&ctrl->admin_tag_set);
312 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
314 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
316 if (list_empty(&ctrl->list))
319 mutex_lock(&nvme_loop_ctrl_mutex);
320 list_del(&ctrl->list);
321 mutex_unlock(&nvme_loop_ctrl_mutex);
324 blk_cleanup_queue(ctrl->ctrl.connect_q);
325 blk_mq_free_tag_set(&ctrl->tag_set);
328 nvmf_free_options(nctrl->opts);
333 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
337 for (i = 1; i < ctrl->queue_count; i++)
338 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
341 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
343 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
344 unsigned int nr_io_queues;
347 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
348 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
349 if (ret || !nr_io_queues)
352 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
354 for (i = 1; i <= nr_io_queues; i++) {
355 ctrl->queues[i].ctrl = ctrl;
356 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
358 goto out_destroy_queues;
366 nvme_loop_destroy_io_queues(ctrl);
370 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
374 for (i = 1; i < ctrl->queue_count; i++) {
375 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
383 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
387 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
388 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
389 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
390 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
391 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
392 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
393 SG_CHUNK_SIZE * sizeof(struct scatterlist);
394 ctrl->admin_tag_set.driver_data = ctrl;
395 ctrl->admin_tag_set.nr_hw_queues = 1;
396 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
398 ctrl->queues[0].ctrl = ctrl;
399 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
402 ctrl->queue_count = 1;
404 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
408 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
409 if (IS_ERR(ctrl->ctrl.admin_q)) {
410 error = PTR_ERR(ctrl->ctrl.admin_q);
411 goto out_free_tagset;
414 error = nvmf_connect_admin_queue(&ctrl->ctrl);
416 goto out_cleanup_queue;
418 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
420 dev_err(ctrl->ctrl.device,
421 "prop_get NVME_REG_CAP failed\n");
422 goto out_cleanup_queue;
426 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
428 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
430 goto out_cleanup_queue;
432 ctrl->ctrl.max_hw_sectors =
433 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
435 error = nvme_init_identify(&ctrl->ctrl);
437 goto out_cleanup_queue;
439 nvme_start_keep_alive(&ctrl->ctrl);
444 blk_cleanup_queue(ctrl->ctrl.admin_q);
446 blk_mq_free_tag_set(&ctrl->admin_tag_set);
448 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
452 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
454 nvme_stop_keep_alive(&ctrl->ctrl);
456 if (ctrl->queue_count > 1) {
457 nvme_stop_queues(&ctrl->ctrl);
458 blk_mq_tagset_busy_iter(&ctrl->tag_set,
459 nvme_cancel_request, &ctrl->ctrl);
460 nvme_loop_destroy_io_queues(ctrl);
463 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
464 nvme_shutdown_ctrl(&ctrl->ctrl);
466 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
467 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
468 nvme_cancel_request, &ctrl->ctrl);
469 nvme_loop_destroy_admin_queue(ctrl);
472 static void nvme_loop_del_ctrl_work(struct work_struct *work)
474 struct nvme_loop_ctrl *ctrl = container_of(work,
475 struct nvme_loop_ctrl, delete_work);
477 nvme_uninit_ctrl(&ctrl->ctrl);
478 nvme_loop_shutdown_ctrl(ctrl);
479 nvme_put_ctrl(&ctrl->ctrl);
482 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
484 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
487 if (!schedule_work(&ctrl->delete_work))
493 static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
495 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
498 ret = __nvme_loop_del_ctrl(ctrl);
502 flush_work(&ctrl->delete_work);
507 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
509 struct nvme_loop_ctrl *ctrl;
511 mutex_lock(&nvme_loop_ctrl_mutex);
512 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
513 if (ctrl->ctrl.cntlid == nctrl->cntlid)
514 __nvme_loop_del_ctrl(ctrl);
516 mutex_unlock(&nvme_loop_ctrl_mutex);
519 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
521 struct nvme_loop_ctrl *ctrl = container_of(work,
522 struct nvme_loop_ctrl, reset_work);
526 nvme_loop_shutdown_ctrl(ctrl);
528 ret = nvme_loop_configure_admin_queue(ctrl);
532 ret = nvme_loop_init_io_queues(ctrl);
534 goto out_destroy_admin;
536 ret = nvme_loop_connect_io_queues(ctrl);
540 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
541 WARN_ON_ONCE(!changed);
543 nvme_queue_scan(&ctrl->ctrl);
544 nvme_queue_async_events(&ctrl->ctrl);
546 nvme_start_queues(&ctrl->ctrl);
551 nvme_loop_destroy_io_queues(ctrl);
553 nvme_loop_destroy_admin_queue(ctrl);
555 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
556 nvme_uninit_ctrl(&ctrl->ctrl);
557 nvme_put_ctrl(&ctrl->ctrl);
560 static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
562 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
564 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
567 if (!schedule_work(&ctrl->reset_work))
570 flush_work(&ctrl->reset_work);
575 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
577 .module = THIS_MODULE,
579 .reg_read32 = nvmf_reg_read32,
580 .reg_read64 = nvmf_reg_read64,
581 .reg_write32 = nvmf_reg_write32,
582 .reset_ctrl = nvme_loop_reset_ctrl,
583 .free_ctrl = nvme_loop_free_ctrl,
584 .submit_async_event = nvme_loop_submit_async_event,
585 .delete_ctrl = nvme_loop_del_ctrl,
586 .get_subsysnqn = nvmf_get_subsysnqn,
589 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
593 ret = nvme_loop_init_io_queues(ctrl);
597 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
598 ctrl->tag_set.ops = &nvme_loop_mq_ops;
599 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
600 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
601 ctrl->tag_set.numa_node = NUMA_NO_NODE;
602 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
603 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
604 SG_CHUNK_SIZE * sizeof(struct scatterlist);
605 ctrl->tag_set.driver_data = ctrl;
606 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
607 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
608 ctrl->ctrl.tagset = &ctrl->tag_set;
610 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
612 goto out_destroy_queues;
614 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
615 if (IS_ERR(ctrl->ctrl.connect_q)) {
616 ret = PTR_ERR(ctrl->ctrl.connect_q);
617 goto out_free_tagset;
620 ret = nvme_loop_connect_io_queues(ctrl);
622 goto out_cleanup_connect_q;
626 out_cleanup_connect_q:
627 blk_cleanup_queue(ctrl->ctrl.connect_q);
629 blk_mq_free_tag_set(&ctrl->tag_set);
631 nvme_loop_destroy_io_queues(ctrl);
635 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
636 struct nvmf_ctrl_options *opts)
638 struct nvme_loop_ctrl *ctrl;
642 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
644 return ERR_PTR(-ENOMEM);
645 ctrl->ctrl.opts = opts;
646 INIT_LIST_HEAD(&ctrl->list);
648 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
649 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
651 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
652 0 /* no quirks, we're perfect! */);
656 spin_lock_init(&ctrl->lock);
660 ctrl->ctrl.sqsize = opts->queue_size - 1;
661 ctrl->ctrl.kato = opts->kato;
663 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
666 goto out_uninit_ctrl;
668 ret = nvme_loop_configure_admin_queue(ctrl);
670 goto out_free_queues;
672 if (opts->queue_size > ctrl->ctrl.maxcmd) {
673 /* warn if maxcmd is lower than queue_size */
674 dev_warn(ctrl->ctrl.device,
675 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
676 opts->queue_size, ctrl->ctrl.maxcmd);
677 opts->queue_size = ctrl->ctrl.maxcmd;
680 if (opts->nr_io_queues) {
681 ret = nvme_loop_create_io_queues(ctrl);
683 goto out_remove_admin_queue;
686 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
688 dev_info(ctrl->ctrl.device,
689 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
691 kref_get(&ctrl->ctrl.kref);
693 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
694 WARN_ON_ONCE(!changed);
696 mutex_lock(&nvme_loop_ctrl_mutex);
697 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
698 mutex_unlock(&nvme_loop_ctrl_mutex);
700 if (opts->nr_io_queues) {
701 nvme_queue_scan(&ctrl->ctrl);
702 nvme_queue_async_events(&ctrl->ctrl);
707 out_remove_admin_queue:
708 nvme_loop_destroy_admin_queue(ctrl);
712 nvme_uninit_ctrl(&ctrl->ctrl);
714 nvme_put_ctrl(&ctrl->ctrl);
720 static int nvme_loop_add_port(struct nvmet_port *port)
723 * XXX: disalow adding more than one port so
724 * there is no connection rejections when a
725 * a subsystem is assigned to a port for which
726 * loop doesn't have a pointer.
727 * This scenario would be possible if we allowed
728 * more than one port to be added and a subsystem
729 * was assigned to a port other than nvmet_loop_port.
735 nvmet_loop_port = port;
739 static void nvme_loop_remove_port(struct nvmet_port *port)
741 if (port == nvmet_loop_port)
742 nvmet_loop_port = NULL;
745 static struct nvmet_fabrics_ops nvme_loop_ops = {
746 .owner = THIS_MODULE,
747 .type = NVMF_TRTYPE_LOOP,
748 .add_port = nvme_loop_add_port,
749 .remove_port = nvme_loop_remove_port,
750 .queue_response = nvme_loop_queue_response,
751 .delete_ctrl = nvme_loop_delete_ctrl,
754 static struct nvmf_transport_ops nvme_loop_transport = {
756 .create_ctrl = nvme_loop_create_ctrl,
759 static int __init nvme_loop_init_module(void)
763 ret = nvmet_register_transport(&nvme_loop_ops);
767 ret = nvmf_register_transport(&nvme_loop_transport);
769 nvmet_unregister_transport(&nvme_loop_ops);
774 static void __exit nvme_loop_cleanup_module(void)
776 struct nvme_loop_ctrl *ctrl, *next;
778 nvmf_unregister_transport(&nvme_loop_transport);
779 nvmet_unregister_transport(&nvme_loop_ops);
781 mutex_lock(&nvme_loop_ctrl_mutex);
782 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
783 __nvme_loop_del_ctrl(ctrl);
784 mutex_unlock(&nvme_loop_ctrl_mutex);
786 flush_scheduled_work();
789 module_init(nvme_loop_init_module);
790 module_exit(nvme_loop_cleanup_module);
792 MODULE_LICENSE("GPL v2");
793 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */