]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/nvme/target/rdma.c
nvmet-rdma: Fix missing dma sync to nvme data structures
[karo-tx-linux.git] / drivers / nvme / target / rdma.c
1 /*
2  * NVMe over Fabrics RDMA target.
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/atomic.h>
16 #include <linux/ctype.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/nvme.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/wait.h>
25 #include <linux/inet.h>
26 #include <asm/unaligned.h>
27
28 #include <rdma/ib_verbs.h>
29 #include <rdma/rdma_cm.h>
30 #include <rdma/rw.h>
31
32 #include <linux/nvme-rdma.h>
33 #include "nvmet.h"
34
35 /*
36  * We allow up to a page of inline data to go with the SQE
37  */
38 #define NVMET_RDMA_INLINE_DATA_SIZE     PAGE_SIZE
39
40 struct nvmet_rdma_cmd {
41         struct ib_sge           sge[2];
42         struct ib_cqe           cqe;
43         struct ib_recv_wr       wr;
44         struct scatterlist      inline_sg;
45         struct page             *inline_page;
46         struct nvme_command     *nvme_cmd;
47         struct nvmet_rdma_queue *queue;
48 };
49
50 enum {
51         NVMET_RDMA_REQ_INLINE_DATA      = (1 << 0),
52         NVMET_RDMA_REQ_INVALIDATE_RKEY  = (1 << 1),
53 };
54
55 struct nvmet_rdma_rsp {
56         struct ib_sge           send_sge;
57         struct ib_cqe           send_cqe;
58         struct ib_send_wr       send_wr;
59
60         struct nvmet_rdma_cmd   *cmd;
61         struct nvmet_rdma_queue *queue;
62
63         struct ib_cqe           read_cqe;
64         struct rdma_rw_ctx      rw;
65
66         struct nvmet_req        req;
67
68         u8                      n_rdma;
69         u32                     flags;
70         u32                     invalidate_rkey;
71
72         struct list_head        wait_list;
73         struct list_head        free_list;
74 };
75
76 enum nvmet_rdma_queue_state {
77         NVMET_RDMA_Q_CONNECTING,
78         NVMET_RDMA_Q_LIVE,
79         NVMET_RDMA_Q_DISCONNECTING,
80         NVMET_RDMA_IN_DEVICE_REMOVAL,
81 };
82
83 struct nvmet_rdma_queue {
84         struct rdma_cm_id       *cm_id;
85         struct nvmet_port       *port;
86         struct ib_cq            *cq;
87         atomic_t                sq_wr_avail;
88         struct nvmet_rdma_device *dev;
89         spinlock_t              state_lock;
90         enum nvmet_rdma_queue_state state;
91         struct nvmet_cq         nvme_cq;
92         struct nvmet_sq         nvme_sq;
93
94         struct nvmet_rdma_rsp   *rsps;
95         struct list_head        free_rsps;
96         spinlock_t              rsps_lock;
97         struct nvmet_rdma_cmd   *cmds;
98
99         struct work_struct      release_work;
100         struct list_head        rsp_wait_list;
101         struct list_head        rsp_wr_wait_list;
102         spinlock_t              rsp_wr_wait_lock;
103
104         int                     idx;
105         int                     host_qid;
106         int                     recv_queue_size;
107         int                     send_queue_size;
108
109         struct list_head        queue_list;
110 };
111
112 struct nvmet_rdma_device {
113         struct ib_device        *device;
114         struct ib_pd            *pd;
115         struct ib_srq           *srq;
116         struct nvmet_rdma_cmd   *srq_cmds;
117         size_t                  srq_size;
118         struct kref             ref;
119         struct list_head        entry;
120 };
121
122 static bool nvmet_rdma_use_srq;
123 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
124 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
125
126 static DEFINE_IDA(nvmet_rdma_queue_ida);
127 static LIST_HEAD(nvmet_rdma_queue_list);
128 static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
129
130 static LIST_HEAD(device_list);
131 static DEFINE_MUTEX(device_list_mutex);
132
133 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
134 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
135 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
137 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
138 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
139
140 static struct nvmet_fabrics_ops nvmet_rdma_ops;
141
142 /* XXX: really should move to a generic header sooner or later.. */
143 static inline u32 get_unaligned_le24(const u8 *p)
144 {
145         return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
146 }
147
148 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
149 {
150         return nvme_is_write(rsp->req.cmd) &&
151                 rsp->req.data_len &&
152                 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
153 }
154
155 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
156 {
157         return !nvme_is_write(rsp->req.cmd) &&
158                 rsp->req.data_len &&
159                 !rsp->req.rsp->status &&
160                 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
161 }
162
163 static inline struct nvmet_rdma_rsp *
164 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
165 {
166         struct nvmet_rdma_rsp *rsp;
167         unsigned long flags;
168
169         spin_lock_irqsave(&queue->rsps_lock, flags);
170         rsp = list_first_entry(&queue->free_rsps,
171                                 struct nvmet_rdma_rsp, free_list);
172         list_del(&rsp->free_list);
173         spin_unlock_irqrestore(&queue->rsps_lock, flags);
174
175         return rsp;
176 }
177
178 static inline void
179 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
180 {
181         unsigned long flags;
182
183         spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
184         list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
185         spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
186 }
187
188 static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
189 {
190         struct scatterlist *sg;
191         int count;
192
193         if (!sgl || !nents)
194                 return;
195
196         for_each_sg(sgl, sg, nents, count)
197                 __free_page(sg_page(sg));
198         kfree(sgl);
199 }
200
201 static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
202                 u32 length)
203 {
204         struct scatterlist *sg;
205         struct page *page;
206         unsigned int nent;
207         int i = 0;
208
209         nent = DIV_ROUND_UP(length, PAGE_SIZE);
210         sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
211         if (!sg)
212                 goto out;
213
214         sg_init_table(sg, nent);
215
216         while (length) {
217                 u32 page_len = min_t(u32, length, PAGE_SIZE);
218
219                 page = alloc_page(GFP_KERNEL);
220                 if (!page)
221                         goto out_free_pages;
222
223                 sg_set_page(&sg[i], page, page_len, 0);
224                 length -= page_len;
225                 i++;
226         }
227         *sgl = sg;
228         *nents = nent;
229         return 0;
230
231 out_free_pages:
232         while (i > 0) {
233                 i--;
234                 __free_page(sg_page(&sg[i]));
235         }
236         kfree(sg);
237 out:
238         return NVME_SC_INTERNAL;
239 }
240
241 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
242                         struct nvmet_rdma_cmd *c, bool admin)
243 {
244         /* NVMe command / RDMA RECV */
245         c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
246         if (!c->nvme_cmd)
247                 goto out;
248
249         c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
250                         sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
251         if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
252                 goto out_free_cmd;
253
254         c->sge[0].length = sizeof(*c->nvme_cmd);
255         c->sge[0].lkey = ndev->pd->local_dma_lkey;
256
257         if (!admin) {
258                 c->inline_page = alloc_pages(GFP_KERNEL,
259                                 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
260                 if (!c->inline_page)
261                         goto out_unmap_cmd;
262                 c->sge[1].addr = ib_dma_map_page(ndev->device,
263                                 c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
264                                 DMA_FROM_DEVICE);
265                 if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
266                         goto out_free_inline_page;
267                 c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
268                 c->sge[1].lkey = ndev->pd->local_dma_lkey;
269         }
270
271         c->cqe.done = nvmet_rdma_recv_done;
272
273         c->wr.wr_cqe = &c->cqe;
274         c->wr.sg_list = c->sge;
275         c->wr.num_sge = admin ? 1 : 2;
276
277         return 0;
278
279 out_free_inline_page:
280         if (!admin) {
281                 __free_pages(c->inline_page,
282                                 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
283         }
284 out_unmap_cmd:
285         ib_dma_unmap_single(ndev->device, c->sge[0].addr,
286                         sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
287 out_free_cmd:
288         kfree(c->nvme_cmd);
289
290 out:
291         return -ENOMEM;
292 }
293
294 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
295                 struct nvmet_rdma_cmd *c, bool admin)
296 {
297         if (!admin) {
298                 ib_dma_unmap_page(ndev->device, c->sge[1].addr,
299                                 NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
300                 __free_pages(c->inline_page,
301                                 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
302         }
303         ib_dma_unmap_single(ndev->device, c->sge[0].addr,
304                                 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
305         kfree(c->nvme_cmd);
306 }
307
308 static struct nvmet_rdma_cmd *
309 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
310                 int nr_cmds, bool admin)
311 {
312         struct nvmet_rdma_cmd *cmds;
313         int ret = -EINVAL, i;
314
315         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
316         if (!cmds)
317                 goto out;
318
319         for (i = 0; i < nr_cmds; i++) {
320                 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
321                 if (ret)
322                         goto out_free;
323         }
324
325         return cmds;
326
327 out_free:
328         while (--i >= 0)
329                 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
330         kfree(cmds);
331 out:
332         return ERR_PTR(ret);
333 }
334
335 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
336                 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
337 {
338         int i;
339
340         for (i = 0; i < nr_cmds; i++)
341                 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
342         kfree(cmds);
343 }
344
345 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
346                 struct nvmet_rdma_rsp *r)
347 {
348         /* NVMe CQE / RDMA SEND */
349         r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
350         if (!r->req.rsp)
351                 goto out;
352
353         r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
354                         sizeof(*r->req.rsp), DMA_TO_DEVICE);
355         if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
356                 goto out_free_rsp;
357
358         r->send_sge.length = sizeof(*r->req.rsp);
359         r->send_sge.lkey = ndev->pd->local_dma_lkey;
360
361         r->send_cqe.done = nvmet_rdma_send_done;
362
363         r->send_wr.wr_cqe = &r->send_cqe;
364         r->send_wr.sg_list = &r->send_sge;
365         r->send_wr.num_sge = 1;
366         r->send_wr.send_flags = IB_SEND_SIGNALED;
367
368         /* Data In / RDMA READ */
369         r->read_cqe.done = nvmet_rdma_read_data_done;
370         return 0;
371
372 out_free_rsp:
373         kfree(r->req.rsp);
374 out:
375         return -ENOMEM;
376 }
377
378 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
379                 struct nvmet_rdma_rsp *r)
380 {
381         ib_dma_unmap_single(ndev->device, r->send_sge.addr,
382                                 sizeof(*r->req.rsp), DMA_TO_DEVICE);
383         kfree(r->req.rsp);
384 }
385
386 static int
387 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
388 {
389         struct nvmet_rdma_device *ndev = queue->dev;
390         int nr_rsps = queue->recv_queue_size * 2;
391         int ret = -EINVAL, i;
392
393         queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
394                         GFP_KERNEL);
395         if (!queue->rsps)
396                 goto out;
397
398         for (i = 0; i < nr_rsps; i++) {
399                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
400
401                 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
402                 if (ret)
403                         goto out_free;
404
405                 list_add_tail(&rsp->free_list, &queue->free_rsps);
406         }
407
408         return 0;
409
410 out_free:
411         while (--i >= 0) {
412                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
413
414                 list_del(&rsp->free_list);
415                 nvmet_rdma_free_rsp(ndev, rsp);
416         }
417         kfree(queue->rsps);
418 out:
419         return ret;
420 }
421
422 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
423 {
424         struct nvmet_rdma_device *ndev = queue->dev;
425         int i, nr_rsps = queue->recv_queue_size * 2;
426
427         for (i = 0; i < nr_rsps; i++) {
428                 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
429
430                 list_del(&rsp->free_list);
431                 nvmet_rdma_free_rsp(ndev, rsp);
432         }
433         kfree(queue->rsps);
434 }
435
436 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
437                 struct nvmet_rdma_cmd *cmd)
438 {
439         struct ib_recv_wr *bad_wr;
440
441         ib_dma_sync_single_for_device(ndev->device,
442                 cmd->sge[0].addr, cmd->sge[0].length,
443                 DMA_FROM_DEVICE);
444
445         if (ndev->srq)
446                 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
447         return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
448 }
449
450 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
451 {
452         spin_lock(&queue->rsp_wr_wait_lock);
453         while (!list_empty(&queue->rsp_wr_wait_list)) {
454                 struct nvmet_rdma_rsp *rsp;
455                 bool ret;
456
457                 rsp = list_entry(queue->rsp_wr_wait_list.next,
458                                 struct nvmet_rdma_rsp, wait_list);
459                 list_del(&rsp->wait_list);
460
461                 spin_unlock(&queue->rsp_wr_wait_lock);
462                 ret = nvmet_rdma_execute_command(rsp);
463                 spin_lock(&queue->rsp_wr_wait_lock);
464
465                 if (!ret) {
466                         list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
467                         break;
468                 }
469         }
470         spin_unlock(&queue->rsp_wr_wait_lock);
471 }
472
473
474 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
475 {
476         struct nvmet_rdma_queue *queue = rsp->queue;
477
478         atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
479
480         if (rsp->n_rdma) {
481                 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
482                                 queue->cm_id->port_num, rsp->req.sg,
483                                 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
484         }
485
486         if (rsp->req.sg != &rsp->cmd->inline_sg)
487                 nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
488
489         if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
490                 nvmet_rdma_process_wr_wait_list(queue);
491
492         nvmet_rdma_put_rsp(rsp);
493 }
494
495 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
496 {
497         if (queue->nvme_sq.ctrl) {
498                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
499         } else {
500                 /*
501                  * we didn't setup the controller yet in case
502                  * of admin connect error, just disconnect and
503                  * cleanup the queue
504                  */
505                 nvmet_rdma_queue_disconnect(queue);
506         }
507 }
508
509 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
510 {
511         struct nvmet_rdma_rsp *rsp =
512                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
513
514         nvmet_rdma_release_rsp(rsp);
515
516         if (unlikely(wc->status != IB_WC_SUCCESS &&
517                      wc->status != IB_WC_WR_FLUSH_ERR)) {
518                 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
519                         wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
520                 nvmet_rdma_error_comp(rsp->queue);
521         }
522 }
523
524 static void nvmet_rdma_queue_response(struct nvmet_req *req)
525 {
526         struct nvmet_rdma_rsp *rsp =
527                 container_of(req, struct nvmet_rdma_rsp, req);
528         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
529         struct ib_send_wr *first_wr, *bad_wr;
530
531         if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
532                 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
533                 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
534         } else {
535                 rsp->send_wr.opcode = IB_WR_SEND;
536         }
537
538         if (nvmet_rdma_need_data_out(rsp))
539                 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
540                                 cm_id->port_num, NULL, &rsp->send_wr);
541         else
542                 first_wr = &rsp->send_wr;
543
544         nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
545
546         ib_dma_sync_single_for_device(rsp->queue->dev->device,
547                 rsp->send_sge.addr, rsp->send_sge.length,
548                 DMA_TO_DEVICE);
549
550         if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
551                 pr_err("sending cmd response failed\n");
552                 nvmet_rdma_release_rsp(rsp);
553         }
554 }
555
556 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
557 {
558         struct nvmet_rdma_rsp *rsp =
559                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
560         struct nvmet_rdma_queue *queue = cq->cq_context;
561
562         WARN_ON(rsp->n_rdma <= 0);
563         atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
564         rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
565                         queue->cm_id->port_num, rsp->req.sg,
566                         rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
567         rsp->n_rdma = 0;
568
569         if (unlikely(wc->status != IB_WC_SUCCESS)) {
570                 nvmet_rdma_release_rsp(rsp);
571                 if (wc->status != IB_WC_WR_FLUSH_ERR) {
572                         pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
573                                 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
574                         nvmet_rdma_error_comp(queue);
575                 }
576                 return;
577         }
578
579         rsp->req.execute(&rsp->req);
580 }
581
582 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
583                 u64 off)
584 {
585         sg_init_table(&rsp->cmd->inline_sg, 1);
586         sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
587         rsp->req.sg = &rsp->cmd->inline_sg;
588         rsp->req.sg_cnt = 1;
589 }
590
591 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
592 {
593         struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
594         u64 off = le64_to_cpu(sgl->addr);
595         u32 len = le32_to_cpu(sgl->length);
596
597         if (!nvme_is_write(rsp->req.cmd))
598                 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
599
600         if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
601                 pr_err("invalid inline data offset!\n");
602                 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
603         }
604
605         /* no data command? */
606         if (!len)
607                 return 0;
608
609         nvmet_rdma_use_inline_sg(rsp, len, off);
610         rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
611         return 0;
612 }
613
614 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
615                 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
616 {
617         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
618         u64 addr = le64_to_cpu(sgl->addr);
619         u32 len = get_unaligned_le24(sgl->length);
620         u32 key = get_unaligned_le32(sgl->key);
621         int ret;
622         u16 status;
623
624         /* no data command? */
625         if (!len)
626                 return 0;
627
628         status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
629                         len);
630         if (status)
631                 return status;
632
633         ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
634                         rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
635                         nvmet_data_dir(&rsp->req));
636         if (ret < 0)
637                 return NVME_SC_INTERNAL;
638         rsp->n_rdma += ret;
639
640         if (invalidate) {
641                 rsp->invalidate_rkey = key;
642                 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
643         }
644
645         return 0;
646 }
647
648 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
649 {
650         struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
651
652         switch (sgl->type >> 4) {
653         case NVME_SGL_FMT_DATA_DESC:
654                 switch (sgl->type & 0xf) {
655                 case NVME_SGL_FMT_OFFSET:
656                         return nvmet_rdma_map_sgl_inline(rsp);
657                 default:
658                         pr_err("invalid SGL subtype: %#x\n", sgl->type);
659                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
660                 }
661         case NVME_KEY_SGL_FMT_DATA_DESC:
662                 switch (sgl->type & 0xf) {
663                 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
664                         return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
665                 case NVME_SGL_FMT_ADDRESS:
666                         return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
667                 default:
668                         pr_err("invalid SGL subtype: %#x\n", sgl->type);
669                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
670                 }
671         default:
672                 pr_err("invalid SGL type: %#x\n", sgl->type);
673                 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
674         }
675 }
676
677 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
678 {
679         struct nvmet_rdma_queue *queue = rsp->queue;
680
681         if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
682                         &queue->sq_wr_avail) < 0)) {
683                 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
684                                 1 + rsp->n_rdma, queue->idx,
685                                 queue->nvme_sq.ctrl->cntlid);
686                 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
687                 return false;
688         }
689
690         if (nvmet_rdma_need_data_in(rsp)) {
691                 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
692                                 queue->cm_id->port_num, &rsp->read_cqe, NULL))
693                         nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
694         } else {
695                 rsp->req.execute(&rsp->req);
696         }
697
698         return true;
699 }
700
701 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
702                 struct nvmet_rdma_rsp *cmd)
703 {
704         u16 status;
705
706         cmd->queue = queue;
707         cmd->n_rdma = 0;
708         cmd->req.port = queue->port;
709
710
711         ib_dma_sync_single_for_cpu(queue->dev->device,
712                 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713                 DMA_FROM_DEVICE);
714         ib_dma_sync_single_for_cpu(queue->dev->device,
715                 cmd->send_sge.addr, cmd->send_sge.length,
716                 DMA_TO_DEVICE);
717
718         if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
719                         &queue->nvme_sq, &nvmet_rdma_ops))
720                 return;
721
722         status = nvmet_rdma_map_sgl(cmd);
723         if (status)
724                 goto out_err;
725
726         if (unlikely(!nvmet_rdma_execute_command(cmd))) {
727                 spin_lock(&queue->rsp_wr_wait_lock);
728                 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
729                 spin_unlock(&queue->rsp_wr_wait_lock);
730         }
731
732         return;
733
734 out_err:
735         nvmet_req_complete(&cmd->req, status);
736 }
737
738 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
739 {
740         struct nvmet_rdma_cmd *cmd =
741                 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
742         struct nvmet_rdma_queue *queue = cq->cq_context;
743         struct nvmet_rdma_rsp *rsp;
744
745         if (unlikely(wc->status != IB_WC_SUCCESS)) {
746                 if (wc->status != IB_WC_WR_FLUSH_ERR) {
747                         pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
748                                 wc->wr_cqe, ib_wc_status_msg(wc->status),
749                                 wc->status);
750                         nvmet_rdma_error_comp(queue);
751                 }
752                 return;
753         }
754
755         if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
756                 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
757                 nvmet_rdma_error_comp(queue);
758                 return;
759         }
760
761         cmd->queue = queue;
762         rsp = nvmet_rdma_get_rsp(queue);
763         rsp->cmd = cmd;
764         rsp->flags = 0;
765         rsp->req.cmd = cmd->nvme_cmd;
766
767         if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
768                 unsigned long flags;
769
770                 spin_lock_irqsave(&queue->state_lock, flags);
771                 if (queue->state == NVMET_RDMA_Q_CONNECTING)
772                         list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
773                 else
774                         nvmet_rdma_put_rsp(rsp);
775                 spin_unlock_irqrestore(&queue->state_lock, flags);
776                 return;
777         }
778
779         nvmet_rdma_handle_command(queue, rsp);
780 }
781
782 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
783 {
784         if (!ndev->srq)
785                 return;
786
787         nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
788         ib_destroy_srq(ndev->srq);
789 }
790
791 static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
792 {
793         struct ib_srq_init_attr srq_attr = { NULL, };
794         struct ib_srq *srq;
795         size_t srq_size;
796         int ret, i;
797
798         srq_size = 4095;        /* XXX: tune */
799
800         srq_attr.attr.max_wr = srq_size;
801         srq_attr.attr.max_sge = 2;
802         srq_attr.attr.srq_limit = 0;
803         srq_attr.srq_type = IB_SRQT_BASIC;
804         srq = ib_create_srq(ndev->pd, &srq_attr);
805         if (IS_ERR(srq)) {
806                 /*
807                  * If SRQs aren't supported we just go ahead and use normal
808                  * non-shared receive queues.
809                  */
810                 pr_info("SRQ requested but not supported.\n");
811                 return 0;
812         }
813
814         ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
815         if (IS_ERR(ndev->srq_cmds)) {
816                 ret = PTR_ERR(ndev->srq_cmds);
817                 goto out_destroy_srq;
818         }
819
820         ndev->srq = srq;
821         ndev->srq_size = srq_size;
822
823         for (i = 0; i < srq_size; i++)
824                 nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
825
826         return 0;
827
828 out_destroy_srq:
829         ib_destroy_srq(srq);
830         return ret;
831 }
832
833 static void nvmet_rdma_free_dev(struct kref *ref)
834 {
835         struct nvmet_rdma_device *ndev =
836                 container_of(ref, struct nvmet_rdma_device, ref);
837
838         mutex_lock(&device_list_mutex);
839         list_del(&ndev->entry);
840         mutex_unlock(&device_list_mutex);
841
842         nvmet_rdma_destroy_srq(ndev);
843         ib_dealloc_pd(ndev->pd);
844
845         kfree(ndev);
846 }
847
848 static struct nvmet_rdma_device *
849 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
850 {
851         struct nvmet_rdma_device *ndev;
852         int ret;
853
854         mutex_lock(&device_list_mutex);
855         list_for_each_entry(ndev, &device_list, entry) {
856                 if (ndev->device->node_guid == cm_id->device->node_guid &&
857                     kref_get_unless_zero(&ndev->ref))
858                         goto out_unlock;
859         }
860
861         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
862         if (!ndev)
863                 goto out_err;
864
865         ndev->device = cm_id->device;
866         kref_init(&ndev->ref);
867
868         ndev->pd = ib_alloc_pd(ndev->device, 0);
869         if (IS_ERR(ndev->pd))
870                 goto out_free_dev;
871
872         if (nvmet_rdma_use_srq) {
873                 ret = nvmet_rdma_init_srq(ndev);
874                 if (ret)
875                         goto out_free_pd;
876         }
877
878         list_add(&ndev->entry, &device_list);
879 out_unlock:
880         mutex_unlock(&device_list_mutex);
881         pr_debug("added %s.\n", ndev->device->name);
882         return ndev;
883
884 out_free_pd:
885         ib_dealloc_pd(ndev->pd);
886 out_free_dev:
887         kfree(ndev);
888 out_err:
889         mutex_unlock(&device_list_mutex);
890         return NULL;
891 }
892
893 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
894 {
895         struct ib_qp_init_attr qp_attr;
896         struct nvmet_rdma_device *ndev = queue->dev;
897         int comp_vector, nr_cqe, ret, i;
898
899         /*
900          * Spread the io queues across completion vectors,
901          * but still keep all admin queues on vector 0.
902          */
903         comp_vector = !queue->host_qid ? 0 :
904                 queue->idx % ndev->device->num_comp_vectors;
905
906         /*
907          * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
908          */
909         nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
910
911         queue->cq = ib_alloc_cq(ndev->device, queue,
912                         nr_cqe + 1, comp_vector,
913                         IB_POLL_WORKQUEUE);
914         if (IS_ERR(queue->cq)) {
915                 ret = PTR_ERR(queue->cq);
916                 pr_err("failed to create CQ cqe= %d ret= %d\n",
917                        nr_cqe + 1, ret);
918                 goto out;
919         }
920
921         memset(&qp_attr, 0, sizeof(qp_attr));
922         qp_attr.qp_context = queue;
923         qp_attr.event_handler = nvmet_rdma_qp_event;
924         qp_attr.send_cq = queue->cq;
925         qp_attr.recv_cq = queue->cq;
926         qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
927         qp_attr.qp_type = IB_QPT_RC;
928         /* +1 for drain */
929         qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
930         qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
931         qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
932                                         ndev->device->attrs.max_sge);
933
934         if (ndev->srq) {
935                 qp_attr.srq = ndev->srq;
936         } else {
937                 /* +1 for drain */
938                 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
939                 qp_attr.cap.max_recv_sge = 2;
940         }
941
942         ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
943         if (ret) {
944                 pr_err("failed to create_qp ret= %d\n", ret);
945                 goto err_destroy_cq;
946         }
947
948         atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
949
950         pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
951                  __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
952                  qp_attr.cap.max_send_wr, queue->cm_id);
953
954         if (!ndev->srq) {
955                 for (i = 0; i < queue->recv_queue_size; i++) {
956                         queue->cmds[i].queue = queue;
957                         nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
958                 }
959         }
960
961 out:
962         return ret;
963
964 err_destroy_cq:
965         ib_free_cq(queue->cq);
966         goto out;
967 }
968
969 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
970 {
971         ib_drain_qp(queue->cm_id->qp);
972         rdma_destroy_qp(queue->cm_id);
973         ib_free_cq(queue->cq);
974 }
975
976 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
977 {
978         pr_info("freeing queue %d\n", queue->idx);
979
980         nvmet_sq_destroy(&queue->nvme_sq);
981
982         nvmet_rdma_destroy_queue_ib(queue);
983         if (!queue->dev->srq) {
984                 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
985                                 queue->recv_queue_size,
986                                 !queue->host_qid);
987         }
988         nvmet_rdma_free_rsps(queue);
989         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
990         kfree(queue);
991 }
992
993 static void nvmet_rdma_release_queue_work(struct work_struct *w)
994 {
995         struct nvmet_rdma_queue *queue =
996                 container_of(w, struct nvmet_rdma_queue, release_work);
997         struct rdma_cm_id *cm_id = queue->cm_id;
998         struct nvmet_rdma_device *dev = queue->dev;
999         enum nvmet_rdma_queue_state state = queue->state;
1000
1001         nvmet_rdma_free_queue(queue);
1002
1003         if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
1004                 rdma_destroy_id(cm_id);
1005
1006         kref_put(&dev->ref, nvmet_rdma_free_dev);
1007 }
1008
1009 static int
1010 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1011                                 struct nvmet_rdma_queue *queue)
1012 {
1013         struct nvme_rdma_cm_req *req;
1014
1015         req = (struct nvme_rdma_cm_req *)conn->private_data;
1016         if (!req || conn->private_data_len == 0)
1017                 return NVME_RDMA_CM_INVALID_LEN;
1018
1019         if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1020                 return NVME_RDMA_CM_INVALID_RECFMT;
1021
1022         queue->host_qid = le16_to_cpu(req->qid);
1023
1024         /*
1025          * req->hsqsize corresponds to our recv queue size plus 1
1026          * req->hrqsize corresponds to our send queue size
1027          */
1028         queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1029         queue->send_queue_size = le16_to_cpu(req->hrqsize);
1030
1031         if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
1032                 return NVME_RDMA_CM_INVALID_HSQSIZE;
1033
1034         /* XXX: Should we enforce some kind of max for IO queues? */
1035
1036         return 0;
1037 }
1038
1039 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1040                                 enum nvme_rdma_cm_status status)
1041 {
1042         struct nvme_rdma_cm_rej rej;
1043
1044         rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1045         rej.sts = cpu_to_le16(status);
1046
1047         return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1048 }
1049
1050 static struct nvmet_rdma_queue *
1051 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1052                 struct rdma_cm_id *cm_id,
1053                 struct rdma_cm_event *event)
1054 {
1055         struct nvmet_rdma_queue *queue;
1056         int ret;
1057
1058         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1059         if (!queue) {
1060                 ret = NVME_RDMA_CM_NO_RSC;
1061                 goto out_reject;
1062         }
1063
1064         ret = nvmet_sq_init(&queue->nvme_sq);
1065         if (ret) {
1066                 ret = NVME_RDMA_CM_NO_RSC;
1067                 goto out_free_queue;
1068         }
1069
1070         ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1071         if (ret)
1072                 goto out_destroy_sq;
1073
1074         /*
1075          * Schedules the actual release because calling rdma_destroy_id from
1076          * inside a CM callback would trigger a deadlock. (great API design..)
1077          */
1078         INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1079         queue->dev = ndev;
1080         queue->cm_id = cm_id;
1081
1082         spin_lock_init(&queue->state_lock);
1083         queue->state = NVMET_RDMA_Q_CONNECTING;
1084         INIT_LIST_HEAD(&queue->rsp_wait_list);
1085         INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1086         spin_lock_init(&queue->rsp_wr_wait_lock);
1087         INIT_LIST_HEAD(&queue->free_rsps);
1088         spin_lock_init(&queue->rsps_lock);
1089         INIT_LIST_HEAD(&queue->queue_list);
1090
1091         queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1092         if (queue->idx < 0) {
1093                 ret = NVME_RDMA_CM_NO_RSC;
1094                 goto out_free_queue;
1095         }
1096
1097         ret = nvmet_rdma_alloc_rsps(queue);
1098         if (ret) {
1099                 ret = NVME_RDMA_CM_NO_RSC;
1100                 goto out_ida_remove;
1101         }
1102
1103         if (!ndev->srq) {
1104                 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1105                                 queue->recv_queue_size,
1106                                 !queue->host_qid);
1107                 if (IS_ERR(queue->cmds)) {
1108                         ret = NVME_RDMA_CM_NO_RSC;
1109                         goto out_free_responses;
1110                 }
1111         }
1112
1113         ret = nvmet_rdma_create_queue_ib(queue);
1114         if (ret) {
1115                 pr_err("%s: creating RDMA queue failed (%d).\n",
1116                         __func__, ret);
1117                 ret = NVME_RDMA_CM_NO_RSC;
1118                 goto out_free_cmds;
1119         }
1120
1121         return queue;
1122
1123 out_free_cmds:
1124         if (!ndev->srq) {
1125                 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1126                                 queue->recv_queue_size,
1127                                 !queue->host_qid);
1128         }
1129 out_free_responses:
1130         nvmet_rdma_free_rsps(queue);
1131 out_ida_remove:
1132         ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1133 out_destroy_sq:
1134         nvmet_sq_destroy(&queue->nvme_sq);
1135 out_free_queue:
1136         kfree(queue);
1137 out_reject:
1138         pr_debug("rejecting connect request with status code %d\n", ret);
1139         nvmet_rdma_cm_reject(cm_id, ret);
1140         return NULL;
1141 }
1142
1143 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1144 {
1145         struct nvmet_rdma_queue *queue = priv;
1146
1147         switch (event->event) {
1148         case IB_EVENT_COMM_EST:
1149                 rdma_notify(queue->cm_id, event->event);
1150                 break;
1151         default:
1152                 pr_err("received IB QP event: %s (%d)\n",
1153                        ib_event_msg(event->event), event->event);
1154                 break;
1155         }
1156 }
1157
1158 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1159                 struct nvmet_rdma_queue *queue,
1160                 struct rdma_conn_param *p)
1161 {
1162         struct rdma_conn_param  param = { };
1163         struct nvme_rdma_cm_rep priv = { };
1164         int ret = -ENOMEM;
1165
1166         param.rnr_retry_count = 7;
1167         param.flow_control = 1;
1168         param.initiator_depth = min_t(u8, p->initiator_depth,
1169                 queue->dev->device->attrs.max_qp_init_rd_atom);
1170         param.private_data = &priv;
1171         param.private_data_len = sizeof(priv);
1172         priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1173         priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1174
1175         ret = rdma_accept(cm_id, &param);
1176         if (ret)
1177                 pr_err("rdma_accept failed (error code = %d)\n", ret);
1178
1179         return ret;
1180 }
1181
1182 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1183                 struct rdma_cm_event *event)
1184 {
1185         struct nvmet_rdma_device *ndev;
1186         struct nvmet_rdma_queue *queue;
1187         int ret = -EINVAL;
1188
1189         ndev = nvmet_rdma_find_get_device(cm_id);
1190         if (!ndev) {
1191                 pr_err("no client data!\n");
1192                 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1193                 return -ECONNREFUSED;
1194         }
1195
1196         queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1197         if (!queue) {
1198                 ret = -ENOMEM;
1199                 goto put_device;
1200         }
1201         queue->port = cm_id->context;
1202
1203         ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1204         if (ret)
1205                 goto release_queue;
1206
1207         mutex_lock(&nvmet_rdma_queue_mutex);
1208         list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1209         mutex_unlock(&nvmet_rdma_queue_mutex);
1210
1211         return 0;
1212
1213 release_queue:
1214         nvmet_rdma_free_queue(queue);
1215 put_device:
1216         kref_put(&ndev->ref, nvmet_rdma_free_dev);
1217
1218         return ret;
1219 }
1220
1221 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1222 {
1223         unsigned long flags;
1224
1225         spin_lock_irqsave(&queue->state_lock, flags);
1226         if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1227                 pr_warn("trying to establish a connected queue\n");
1228                 goto out_unlock;
1229         }
1230         queue->state = NVMET_RDMA_Q_LIVE;
1231
1232         while (!list_empty(&queue->rsp_wait_list)) {
1233                 struct nvmet_rdma_rsp *cmd;
1234
1235                 cmd = list_first_entry(&queue->rsp_wait_list,
1236                                         struct nvmet_rdma_rsp, wait_list);
1237                 list_del(&cmd->wait_list);
1238
1239                 spin_unlock_irqrestore(&queue->state_lock, flags);
1240                 nvmet_rdma_handle_command(queue, cmd);
1241                 spin_lock_irqsave(&queue->state_lock, flags);
1242         }
1243
1244 out_unlock:
1245         spin_unlock_irqrestore(&queue->state_lock, flags);
1246 }
1247
1248 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1249 {
1250         bool disconnect = false;
1251         unsigned long flags;
1252
1253         pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1254
1255         spin_lock_irqsave(&queue->state_lock, flags);
1256         switch (queue->state) {
1257         case NVMET_RDMA_Q_CONNECTING:
1258         case NVMET_RDMA_Q_LIVE:
1259                 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1260         case NVMET_RDMA_IN_DEVICE_REMOVAL:
1261                 disconnect = true;
1262                 break;
1263         case NVMET_RDMA_Q_DISCONNECTING:
1264                 break;
1265         }
1266         spin_unlock_irqrestore(&queue->state_lock, flags);
1267
1268         if (disconnect) {
1269                 rdma_disconnect(queue->cm_id);
1270                 schedule_work(&queue->release_work);
1271         }
1272 }
1273
1274 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1275 {
1276         bool disconnect = false;
1277
1278         mutex_lock(&nvmet_rdma_queue_mutex);
1279         if (!list_empty(&queue->queue_list)) {
1280                 list_del_init(&queue->queue_list);
1281                 disconnect = true;
1282         }
1283         mutex_unlock(&nvmet_rdma_queue_mutex);
1284
1285         if (disconnect)
1286                 __nvmet_rdma_queue_disconnect(queue);
1287 }
1288
1289 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1290                 struct nvmet_rdma_queue *queue)
1291 {
1292         WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1293
1294         mutex_lock(&nvmet_rdma_queue_mutex);
1295         if (!list_empty(&queue->queue_list))
1296                 list_del_init(&queue->queue_list);
1297         mutex_unlock(&nvmet_rdma_queue_mutex);
1298
1299         pr_err("failed to connect queue %d\n", queue->idx);
1300         schedule_work(&queue->release_work);
1301 }
1302
1303 /**
1304  * nvme_rdma_device_removal() - Handle RDMA device removal
1305  * @queue:      nvmet rdma queue (cm id qp_context)
1306  * @addr:       nvmet address (cm_id context)
1307  *
1308  * DEVICE_REMOVAL event notifies us that the RDMA device is about
1309  * to unplug so we should take care of destroying our RDMA resources.
1310  * This event will be generated for each allocated cm_id.
1311  *
1312  * Note that this event can be generated on a normal queue cm_id
1313  * and/or a device bound listener cm_id (where in this case
1314  * queue will be null).
1315  *
1316  * we claim ownership on destroying the cm_id. For queues we move
1317  * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
1318  * we nullify the priv to prevent double cm_id destruction and destroying
1319  * the cm_id implicitely by returning a non-zero rc to the callout.
1320  */
1321 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1322                 struct nvmet_rdma_queue *queue)
1323 {
1324         unsigned long flags;
1325
1326         if (!queue) {
1327                 struct nvmet_port *port = cm_id->context;
1328
1329                 /*
1330                  * This is a listener cm_id. Make sure that
1331                  * future remove_port won't invoke a double
1332                  * cm_id destroy. use atomic xchg to make sure
1333                  * we don't compete with remove_port.
1334                  */
1335                 if (xchg(&port->priv, NULL) != cm_id)
1336                         return 0;
1337         } else {
1338                 /*
1339                  * This is a queue cm_id. Make sure that
1340                  * release queue will not destroy the cm_id
1341                  * and schedule all ctrl queues removal (only
1342                  * if the queue is not disconnecting already).
1343                  */
1344                 spin_lock_irqsave(&queue->state_lock, flags);
1345                 if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
1346                         queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
1347                 spin_unlock_irqrestore(&queue->state_lock, flags);
1348                 nvmet_rdma_queue_disconnect(queue);
1349                 flush_scheduled_work();
1350         }
1351
1352         /*
1353          * We need to return 1 so that the core will destroy
1354          * it's own ID.  What a great API design..
1355          */
1356         return 1;
1357 }
1358
1359 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1360                 struct rdma_cm_event *event)
1361 {
1362         struct nvmet_rdma_queue *queue = NULL;
1363         int ret = 0;
1364
1365         if (cm_id->qp)
1366                 queue = cm_id->qp->qp_context;
1367
1368         pr_debug("%s (%d): status %d id %p\n",
1369                 rdma_event_msg(event->event), event->event,
1370                 event->status, cm_id);
1371
1372         switch (event->event) {
1373         case RDMA_CM_EVENT_CONNECT_REQUEST:
1374                 ret = nvmet_rdma_queue_connect(cm_id, event);
1375                 break;
1376         case RDMA_CM_EVENT_ESTABLISHED:
1377                 nvmet_rdma_queue_established(queue);
1378                 break;
1379         case RDMA_CM_EVENT_ADDR_CHANGE:
1380         case RDMA_CM_EVENT_DISCONNECTED:
1381         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1382                 /*
1383                  * We might end up here when we already freed the qp
1384                  * which means queue release sequence is in progress,
1385                  * so don't get in the way...
1386                  */
1387                 if (queue)
1388                         nvmet_rdma_queue_disconnect(queue);
1389                 break;
1390         case RDMA_CM_EVENT_DEVICE_REMOVAL:
1391                 ret = nvmet_rdma_device_removal(cm_id, queue);
1392                 break;
1393         case RDMA_CM_EVENT_REJECTED:
1394                 pr_debug("Connection rejected: %s\n",
1395                          rdma_reject_msg(cm_id, event->status));
1396                 /* FALLTHROUGH */
1397         case RDMA_CM_EVENT_UNREACHABLE:
1398         case RDMA_CM_EVENT_CONNECT_ERROR:
1399                 nvmet_rdma_queue_connect_fail(cm_id, queue);
1400                 break;
1401         default:
1402                 pr_err("received unrecognized RDMA CM event %d\n",
1403                         event->event);
1404                 break;
1405         }
1406
1407         return ret;
1408 }
1409
1410 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1411 {
1412         struct nvmet_rdma_queue *queue;
1413
1414 restart:
1415         mutex_lock(&nvmet_rdma_queue_mutex);
1416         list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1417                 if (queue->nvme_sq.ctrl == ctrl) {
1418                         list_del_init(&queue->queue_list);
1419                         mutex_unlock(&nvmet_rdma_queue_mutex);
1420
1421                         __nvmet_rdma_queue_disconnect(queue);
1422                         goto restart;
1423                 }
1424         }
1425         mutex_unlock(&nvmet_rdma_queue_mutex);
1426 }
1427
1428 static int nvmet_rdma_add_port(struct nvmet_port *port)
1429 {
1430         struct rdma_cm_id *cm_id;
1431         struct sockaddr_in addr_in;
1432         u16 port_in;
1433         int ret;
1434
1435         switch (port->disc_addr.adrfam) {
1436         case NVMF_ADDR_FAMILY_IP4:
1437                 break;
1438         default:
1439                 pr_err("address family %d not supported\n",
1440                                 port->disc_addr.adrfam);
1441                 return -EINVAL;
1442         }
1443
1444         ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in);
1445         if (ret)
1446                 return ret;
1447
1448         addr_in.sin_family = AF_INET;
1449         addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr);
1450         addr_in.sin_port = htons(port_in);
1451
1452         cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1453                         RDMA_PS_TCP, IB_QPT_RC);
1454         if (IS_ERR(cm_id)) {
1455                 pr_err("CM ID creation failed\n");
1456                 return PTR_ERR(cm_id);
1457         }
1458
1459         ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in);
1460         if (ret) {
1461                 pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret);
1462                 goto out_destroy_id;
1463         }
1464
1465         ret = rdma_listen(cm_id, 128);
1466         if (ret) {
1467                 pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret);
1468                 goto out_destroy_id;
1469         }
1470
1471         pr_info("enabling port %d (%pISpc)\n",
1472                 le16_to_cpu(port->disc_addr.portid), &addr_in);
1473         port->priv = cm_id;
1474         return 0;
1475
1476 out_destroy_id:
1477         rdma_destroy_id(cm_id);
1478         return ret;
1479 }
1480
1481 static void nvmet_rdma_remove_port(struct nvmet_port *port)
1482 {
1483         struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1484
1485         if (cm_id)
1486                 rdma_destroy_id(cm_id);
1487 }
1488
1489 static struct nvmet_fabrics_ops nvmet_rdma_ops = {
1490         .owner                  = THIS_MODULE,
1491         .type                   = NVMF_TRTYPE_RDMA,
1492         .sqe_inline_size        = NVMET_RDMA_INLINE_DATA_SIZE,
1493         .msdbd                  = 1,
1494         .has_keyed_sgls         = 1,
1495         .add_port               = nvmet_rdma_add_port,
1496         .remove_port            = nvmet_rdma_remove_port,
1497         .queue_response         = nvmet_rdma_queue_response,
1498         .delete_ctrl            = nvmet_rdma_delete_ctrl,
1499 };
1500
1501 static int __init nvmet_rdma_init(void)
1502 {
1503         return nvmet_register_transport(&nvmet_rdma_ops);
1504 }
1505
1506 static void __exit nvmet_rdma_exit(void)
1507 {
1508         struct nvmet_rdma_queue *queue;
1509
1510         nvmet_unregister_transport(&nvmet_rdma_ops);
1511
1512         flush_scheduled_work();
1513
1514         mutex_lock(&nvmet_rdma_queue_mutex);
1515         while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
1516                         struct nvmet_rdma_queue, queue_list))) {
1517                 list_del_init(&queue->queue_list);
1518
1519                 mutex_unlock(&nvmet_rdma_queue_mutex);
1520                 __nvmet_rdma_queue_disconnect(queue);
1521                 mutex_lock(&nvmet_rdma_queue_mutex);
1522         }
1523         mutex_unlock(&nvmet_rdma_queue_mutex);
1524
1525         flush_scheduled_work();
1526         ida_destroy(&nvmet_rdma_queue_ida);
1527 }
1528
1529 module_init(nvmet_rdma_init);
1530 module_exit(nvmet_rdma_exit);
1531
1532 MODULE_LICENSE("GPL v2");
1533 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */