2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
46 * When the underlying transport disconnects, MRs are left in one of
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
55 * VALID: The MR was registered before the QP entered ERROR state.
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
70 #include "xprt_rdma.h"
72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73 # define RPCDBG_FACILITY RPCDBG_TRANS
77 frwr_is_supported(struct rpcrdma_ia *ia)
79 struct ib_device_attr *attrs = &ia->ri_device->attrs;
81 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
82 goto out_not_supported;
83 if (attrs->max_fast_reg_page_list_len == 0)
84 goto out_not_supported;
88 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
94 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, unsigned int depth)
96 struct rpcrdma_frmr *f = &r->frmr;
99 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
100 if (IS_ERR(f->fr_mr))
103 r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
107 sg_init_table(r->mw_sg, depth);
108 init_completion(&f->fr_linv_done);
112 rc = PTR_ERR(f->fr_mr);
113 dprintk("RPC: %s: ib_alloc_mr status %i\n",
119 dprintk("RPC: %s: sg allocation failure\n",
121 ib_dereg_mr(f->fr_mr);
126 __frwr_release(struct rpcrdma_mw *r)
130 rc = ib_dereg_mr(r->frmr.fr_mr);
132 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
138 __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
140 struct rpcrdma_frmr *f = &r->frmr;
143 rc = ib_dereg_mr(f->fr_mr);
145 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
150 f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
151 ia->ri_max_frmr_depth);
152 if (IS_ERR(f->fr_mr)) {
153 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
154 PTR_ERR(f->fr_mr), r);
155 return PTR_ERR(f->fr_mr);
158 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
159 f->fr_state = FRMR_IS_INVALID;
163 /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
165 * There's no recovery if this fails. The FRMR is abandoned, but
166 * remains in rb_all. It will be cleaned up when the transport is
170 frwr_op_recover_mr(struct rpcrdma_mw *mw)
172 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
173 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
176 rc = __frwr_reset_mr(ia, mw);
177 ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
179 pr_err("rpcrdma: FRMR reset status %d, %p orphaned\n",
181 r_xprt->rx_stats.mrs_orphaned++;
185 rpcrdma_put_mw(r_xprt, mw);
186 r_xprt->rx_stats.mrs_recovered++;
190 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
191 struct rpcrdma_create_data_internal *cdata)
195 ia->ri_max_frmr_depth =
196 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
197 ia->ri_device->attrs.max_fast_reg_page_list_len);
198 dprintk("RPC: %s: device's max FR page list len = %u\n",
199 __func__, ia->ri_max_frmr_depth);
201 /* Add room for frmr register and invalidate WRs.
202 * 1. FRMR reg WR for head
203 * 2. FRMR invalidate WR for head
204 * 3. N FRMR reg WRs for pagelist
205 * 4. N FRMR invalidate WRs for pagelist
206 * 5. FRMR reg WR for tail
207 * 6. FRMR invalidate WR for tail
208 * 7. The RDMA_SEND WR
212 /* Calculate N if the device max FRMR depth is smaller than
213 * RPCRDMA_MAX_DATA_SEGS.
215 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
216 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
218 depth += 2; /* FRMR reg + invalidate */
219 delta -= ia->ri_max_frmr_depth;
223 ep->rep_attr.cap.max_send_wr *= depth;
224 if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
225 cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
226 if (!cdata->max_requests)
228 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
232 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
233 RPCRDMA_MAX_DATA_SEGS /
234 ia->ri_max_frmr_depth));
238 /* FRWR mode conveys a list of pages per chunk segment. The
239 * maximum length of that list is the FRWR page list depth.
242 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
244 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
246 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
247 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
251 __frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
254 frmr->fr_state = FRMR_IS_STALE;
255 if (wc->status != IB_WC_WR_FLUSH_ERR)
256 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
257 wr, ib_wc_status_msg(wc->status),
258 wc->status, wc->vendor_err);
262 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
263 * @cq: completion queue (ignored)
268 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
270 struct rpcrdma_frmr *frmr;
273 /* WARNING: Only wr_cqe and status are reliable at this point */
274 if (wc->status != IB_WC_SUCCESS) {
276 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
277 __frwr_sendcompletion_flush(wc, frmr, "fastreg");
282 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
283 * @cq: completion queue (ignored)
288 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
290 struct rpcrdma_frmr *frmr;
293 /* WARNING: Only wr_cqe and status are reliable at this point */
294 if (wc->status != IB_WC_SUCCESS) {
296 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
297 __frwr_sendcompletion_flush(wc, frmr, "localinv");
302 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
303 * @cq: completion queue (ignored)
306 * Awaken anyone waiting for an MR to finish being fenced.
309 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
311 struct rpcrdma_frmr *frmr;
314 /* WARNING: Only wr_cqe and status are reliable at this point */
316 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
317 if (wc->status != IB_WC_SUCCESS)
318 __frwr_sendcompletion_flush(wc, frmr, "localinv");
319 complete_all(&frmr->fr_linv_done);
323 frwr_op_init(struct rpcrdma_xprt *r_xprt)
325 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
326 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
327 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
330 spin_lock_init(&buf->rb_mwlock);
331 INIT_LIST_HEAD(&buf->rb_mws);
332 INIT_LIST_HEAD(&buf->rb_all);
334 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
335 i += 2; /* head + tail */
336 i *= buf->rb_max_requests; /* one set for each RPC slot */
337 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
340 struct rpcrdma_mw *r;
343 r = kzalloc(sizeof(*r), GFP_KERNEL);
347 rc = __frwr_init(r, pd, depth);
354 list_add(&r->mw_list, &buf->rb_mws);
355 list_add(&r->mw_all, &buf->rb_all);
361 /* Post a REG_MR Work Request to register a memory region
362 * for remote access via RDMA READ or RDMA WRITE.
365 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
366 int nsegs, bool writing)
368 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
369 struct rpcrdma_mr_seg *seg1 = seg;
370 struct rpcrdma_mw *mw;
371 struct rpcrdma_frmr *frmr;
373 struct ib_reg_wr *reg_wr;
374 struct ib_send_wr *bad_wr;
375 int rc, i, n, dma_nents;
382 rpcrdma_defer_mr_recovery(mw);
383 mw = rpcrdma_get_mw(r_xprt);
386 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
388 frmr->fr_state = FRMR_IS_VALID;
390 reg_wr = &frmr->fr_regwr;
392 if (nsegs > ia->ri_max_frmr_depth)
393 nsegs = ia->ri_max_frmr_depth;
394 for (i = 0; i < nsegs;) {
396 sg_set_page(&mw->mw_sg[i],
399 offset_in_page(seg->mr_offset));
401 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
407 /* Check for holes */
408 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
409 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
413 mw->mw_dir = rpcrdma_data_dir(writing);
415 dma_nents = ib_dma_map_sg(ia->ri_device,
416 mw->mw_sg, mw->mw_nents, mw->mw_dir);
420 n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
421 if (unlikely(n != mw->mw_nents))
424 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
425 __func__, mw, mw->mw_nents, mr->length);
427 key = (u8)(mr->rkey & 0x000000FF);
428 ib_update_fast_reg_key(mr, ++key);
430 reg_wr->wr.next = NULL;
431 reg_wr->wr.opcode = IB_WR_REG_MR;
432 frmr->fr_cqe.done = frwr_wc_fastreg;
433 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
434 reg_wr->wr.num_sge = 0;
435 reg_wr->wr.send_flags = 0;
437 reg_wr->key = mr->rkey;
438 reg_wr->access = writing ?
439 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
440 IB_ACCESS_REMOTE_READ;
442 DECR_CQCOUNT(&r_xprt->rx_ep);
443 rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr);
448 seg1->mr_rkey = mr->rkey;
449 seg1->mr_base = mr->iova;
450 seg1->mr_nsegs = mw->mw_nents;
451 seg1->mr_len = mr->length;
456 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
457 mw->mw_sg, mw->mw_nents);
458 rpcrdma_defer_mr_recovery(mw);
462 pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
463 frmr->fr_mr, n, mw->mw_nents);
464 rpcrdma_defer_mr_recovery(mw);
468 pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
469 rpcrdma_defer_mr_recovery(mw);
473 static struct ib_send_wr *
474 __frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
476 struct rpcrdma_mw *mw = seg->rl_mw;
477 struct rpcrdma_frmr *f = &mw->frmr;
478 struct ib_send_wr *invalidate_wr;
480 f->fr_state = FRMR_IS_INVALID;
481 invalidate_wr = &f->fr_invwr;
483 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
484 f->fr_cqe.done = frwr_wc_localinv;
485 invalidate_wr->wr_cqe = &f->fr_cqe;
486 invalidate_wr->opcode = IB_WR_LOCAL_INV;
487 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
489 return invalidate_wr;
492 /* Invalidate all memory regions that were registered for "req".
494 * Sleeps until it is safe for the host CPU to access the
495 * previously mapped memory regions.
498 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
500 struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
501 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
502 struct rpcrdma_mr_seg *seg;
503 unsigned int i, nchunks;
504 struct rpcrdma_frmr *f;
505 struct rpcrdma_mw *mw;
508 dprintk("RPC: %s: req %p\n", __func__, req);
510 /* ORDER: Invalidate all of the req's MRs first
512 * Chain the LOCAL_INV Work Requests and post them with
513 * a single ib_post_send() call.
515 invalidate_wrs = pos = prev = NULL;
517 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
518 seg = &req->rl_segments[i];
520 pos = __frwr_prepare_linv_wr(seg);
523 invalidate_wrs = pos;
530 f = &seg->rl_mw->frmr;
532 /* Strong send queue ordering guarantees that when the
533 * last WR in the chain completes, all WRs in the chain
536 f->fr_invwr.send_flags = IB_SEND_SIGNALED;
537 f->fr_cqe.done = frwr_wc_localinv_wake;
538 reinit_completion(&f->fr_linv_done);
539 INIT_CQCOUNT(&r_xprt->rx_ep);
541 /* Transport disconnect drains the receive CQ before it
542 * replaces the QP. The RPC reply handler won't call us
543 * unless ri_id->qp is a valid pointer.
545 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
549 wait_for_completion(&f->fr_linv_done);
551 /* ORDER: Now DMA unmap all of the req's MRs, and return
552 * them to the free MW list.
555 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
556 seg = &req->rl_segments[i];
560 ib_dma_unmap_sg(ia->ri_device,
561 mw->mw_sg, mw->mw_nents, mw->mw_dir);
562 rpcrdma_put_mw(r_xprt, mw);
572 pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
573 rdma_disconnect(ia->ri_id);
575 /* Find and reset the MRs in the LOCAL_INV WRs that did not
576 * get posted. This is synchronous, and slow.
578 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
579 seg = &req->rl_segments[i];
583 if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
584 __frwr_reset_mr(ia, mw);
585 bad_wr = bad_wr->next;
593 /* Use a slow, safe mechanism to invalidate all memory regions
594 * that were registered for "req".
597 frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
600 struct rpcrdma_mr_seg *seg;
601 struct rpcrdma_mw *mw;
604 for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
605 seg = &req->rl_segments[i];
609 frwr_op_recover_mr(mw);
611 rpcrdma_defer_mr_recovery(mw);
620 frwr_op_destroy(struct rpcrdma_buffer *buf)
622 struct rpcrdma_mw *r;
624 while (!list_empty(&buf->rb_all)) {
625 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
626 list_del(&r->mw_all);
632 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
633 .ro_map = frwr_op_map,
634 .ro_unmap_sync = frwr_op_unmap_sync,
635 .ro_unmap_safe = frwr_op_unmap_safe,
636 .ro_recover_mr = frwr_op_recover_mr,
637 .ro_open = frwr_op_open,
638 .ro_maxpages = frwr_op_maxpages,
639 .ro_init = frwr_op_init,
640 .ro_destroy = frwr_op_destroy,
641 .ro_displayname = "frwr",