]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/sunrpc/xprtrdma/rpc_rdma.c
xprtrdma: Pass only the list of registered MRs to ro_unmap_sync
[karo-tx-linux.git] / net / sunrpc / xprtrdma / rpc_rdma.c
index 694e9b13ecf07722848d86345589ad4793474fb5..c88132d02fb81b2b5a834c292632847850c666b8 100644 (file)
@@ -928,6 +928,24 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
        return fixup_copy_count;
 }
 
+/* Caller must guarantee @rep remains stable during this call.
+ */
+static void
+rpcrdma_mark_remote_invalidation(struct list_head *mws,
+                                struct rpcrdma_rep *rep)
+{
+       struct rpcrdma_mw *mw;
+
+       if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
+               return;
+
+       list_for_each_entry(mw, mws, mw_list)
+               if (mw->mw_handle == rep->rr_inv_rkey) {
+                       mw->mw_flags = RPCRDMA_MW_F_RI;
+                       break; /* only one invalidated MR per RPC */
+               }
+}
+
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 /* By convention, backchannel calls arrive via rdma_msg type
  * messages, and never populate the chunk lists. This makes
@@ -977,6 +995,7 @@ rpcrdma_reply_handler(struct work_struct *work)
        __be32 *iptr;
        int rdmalen, status, rmerr;
        unsigned long cwnd;
+       struct list_head mws;
 
        dprintk("RPC:       %s: incoming rep %p\n", __func__, rep);
 
@@ -1006,13 +1025,14 @@ rpcrdma_reply_handler(struct work_struct *work)
        /* Sanity checking has passed. We are now committed
         * to complete this transaction.
         */
+       list_replace_init(&req->rl_registered, &mws);
+       rpcrdma_mark_remote_invalidation(&mws, rep);
        list_del_init(&rqst->rq_list);
+       req->rl_reply = rep;
        spin_unlock_bh(&xprt->transport_lock);
        dprintk("RPC:       %s: reply %p completes request %p (xid 0x%08x)\n",
                __func__, rep, req, be32_to_cpu(headerp->rm_xid));
 
-       /* from here on, the reply is no longer an orphan */
-       req->rl_reply = rep;
        xprt->reestablish_timeout = 0;
 
        if (headerp->rm_vers != rpcrdma_version)
@@ -1024,12 +1044,9 @@ rpcrdma_reply_handler(struct work_struct *work)
        case rdma_msg:
                /* never expect read chunks */
                /* never expect reply chunks (two ways to check) */
-               /* never expect write chunks without having offered RDMA */
                if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
                    (headerp->rm_body.rm_chunks[1] == xdr_zero &&
-                    headerp->rm_body.rm_chunks[2] != xdr_zero) ||
-                   (headerp->rm_body.rm_chunks[1] != xdr_zero &&
-                    list_empty(&req->rl_registered)))
+                    headerp->rm_body.rm_chunks[2] != xdr_zero))
                        goto badheader;
                if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
                        /* count any expected write chunks in read reply */
@@ -1066,8 +1083,7 @@ rpcrdma_reply_handler(struct work_struct *work)
                /* never expect read or write chunks, always reply chunks */
                if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
                    headerp->rm_body.rm_chunks[1] != xdr_zero ||
-                   headerp->rm_body.rm_chunks[2] != xdr_one ||
-                   list_empty(&req->rl_registered))
+                   headerp->rm_body.rm_chunks[2] != xdr_one)
                        goto badheader;
                iptr = (__be32 *)((unsigned char *)headerp +
                                                        RPCRDMA_HDRLEN_MIN);
@@ -1100,8 +1116,8 @@ out:
         * control: waking the next RPC waits until this RPC has
         * relinquished all its Send Queue entries.
         */
-       if (!list_empty(&req->rl_registered))
-               r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
+       if (!list_empty(&mws))
+               r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &mws);
 
        spin_lock_bh(&xprt->transport_lock);
        cwnd = xprt->cwnd;