]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/sunrpc/xprtrdma/physical_ops.c
powerpc/dma: dma_set_coherent_mask() should not be GPL only
[karo-tx-linux.git] / net / sunrpc / xprtrdma / physical_ops.c
1 /*
2  * Copyright (c) 2015 Oracle.  All rights reserved.
3  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4  */
5
6 /* No-op chunk preparation. All client memory is pre-registered.
7  * Sometimes referred to as ALLPHYSICAL mode.
8  *
9  * Physical registration is simple because all client memory is
10  * pre-registered and never deregistered. This mode is good for
11  * adapter bring up, but is considered not safe: the server is
12  * trusted not to abuse its access to client memory not involved
13  * in RDMA I/O.
14  */
15
16 #include "xprt_rdma.h"
17
18 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19 # define RPCDBG_FACILITY        RPCDBG_TRANS
20 #endif
21
22 static int
23 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24                  struct rpcrdma_create_data_internal *cdata)
25 {
26         struct ib_device_attr *devattr = &ia->ri_devattr;
27         struct ib_mr *mr;
28
29         /* Obtain an rkey to use for RPC data payloads.
30          */
31         mr = ib_get_dma_mr(ia->ri_pd,
32                            IB_ACCESS_LOCAL_WRITE |
33                            IB_ACCESS_REMOTE_WRITE |
34                            IB_ACCESS_REMOTE_READ);
35         if (IS_ERR(mr)) {
36                 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
37                        __func__, PTR_ERR(mr));
38                 return -ENOMEM;
39         }
40         ia->ri_dma_mr = mr;
41
42         /* Obtain an lkey to use for regbufs.
43          */
44         if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
45                 ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
46         else
47                 ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
48
49         return 0;
50 }
51
52 /* PHYSICAL memory registration conveys one page per chunk segment.
53  */
54 static size_t
55 physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
56 {
57         return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
58                      rpcrdma_max_segments(r_xprt));
59 }
60
61 static int
62 physical_op_init(struct rpcrdma_xprt *r_xprt)
63 {
64         return 0;
65 }
66
67 /* The client's physical memory is already exposed for
68  * remote access via RDMA READ or RDMA WRITE.
69  */
70 static int
71 physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
72                 int nsegs, bool writing)
73 {
74         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
75
76         rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
77         seg->mr_rkey = ia->ri_dma_mr->rkey;
78         seg->mr_base = seg->mr_dma;
79         seg->mr_nsegs = 1;
80         return 1;
81 }
82
83 /* Unmap a memory region, but leave it registered.
84  */
85 static int
86 physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
87 {
88         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
89
90         rpcrdma_unmap_one(ia->ri_device, seg);
91         return 1;
92 }
93
94 static void
95 physical_op_destroy(struct rpcrdma_buffer *buf)
96 {
97 }
98
99 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
100         .ro_map                         = physical_op_map,
101         .ro_unmap                       = physical_op_unmap,
102         .ro_open                        = physical_op_open,
103         .ro_maxpages                    = physical_op_maxpages,
104         .ro_init                        = physical_op_init,
105         .ro_destroy                     = physical_op_destroy,
106         .ro_displayname                 = "physical",
107 };