]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/qp.c
Merge remote-tracking branch 'sound-current/for-linus'
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39
40 #include "mlx5_core.h"
41
42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43                                                  u32 rsn)
44 {
45         struct mlx5_qp_table *table = &dev->priv.qp_table;
46         struct mlx5_core_rsc_common *common;
47
48         spin_lock(&table->lock);
49
50         common = radix_tree_lookup(&table->tree, rsn);
51         if (common)
52                 atomic_inc(&common->refcount);
53
54         spin_unlock(&table->lock);
55
56         if (!common) {
57                 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
58                                rsn);
59                 return NULL;
60         }
61         return common;
62 }
63
64 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
65 {
66         if (atomic_dec_and_test(&common->refcount))
67                 complete(&common->free);
68 }
69
70 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
71 {
72         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
73         struct mlx5_core_qp *qp;
74
75         if (!common)
76                 return;
77
78         switch (common->res) {
79         case MLX5_RES_QP:
80                 qp = (struct mlx5_core_qp *)common;
81                 qp->event(qp, event_type);
82                 break;
83
84         default:
85                 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
86         }
87
88         mlx5_core_put_rsc(common);
89 }
90
91 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
92 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
93 {
94         struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
95         int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
96         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
97         struct mlx5_core_qp *qp =
98                 container_of(common, struct mlx5_core_qp, common);
99         struct mlx5_pagefault pfault;
100
101         if (!qp) {
102                 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
103                                qpn);
104                 return;
105         }
106
107         pfault.event_subtype = eqe->sub_type;
108         pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
109                 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
110         pfault.bytes_committed = be32_to_cpu(
111                 pf_eqe->bytes_committed);
112
113         mlx5_core_dbg(dev,
114                       "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
115                       eqe->sub_type, pfault.flags);
116
117         switch (eqe->sub_type) {
118         case MLX5_PFAULT_SUBTYPE_RDMA:
119                 /* RDMA based event */
120                 pfault.rdma.r_key =
121                         be32_to_cpu(pf_eqe->rdma.r_key);
122                 pfault.rdma.packet_size =
123                         be16_to_cpu(pf_eqe->rdma.packet_length);
124                 pfault.rdma.rdma_op_len =
125                         be32_to_cpu(pf_eqe->rdma.rdma_op_len);
126                 pfault.rdma.rdma_va =
127                         be64_to_cpu(pf_eqe->rdma.rdma_va);
128                 mlx5_core_dbg(dev,
129                               "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
130                               qpn, pfault.rdma.r_key);
131                 mlx5_core_dbg(dev,
132                               "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
133                               pfault.rdma.rdma_op_len);
134                 mlx5_core_dbg(dev,
135                               "PAGE_FAULT: rdma_va: 0x%016llx,\n",
136                               pfault.rdma.rdma_va);
137                 mlx5_core_dbg(dev,
138                               "PAGE_FAULT: bytes_committed: 0x%06x\n",
139                               pfault.bytes_committed);
140                 break;
141
142         case MLX5_PFAULT_SUBTYPE_WQE:
143                 /* WQE based event */
144                 pfault.wqe.wqe_index =
145                         be16_to_cpu(pf_eqe->wqe.wqe_index);
146                 pfault.wqe.packet_size =
147                         be16_to_cpu(pf_eqe->wqe.packet_length);
148                 mlx5_core_dbg(dev,
149                               "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
150                               qpn, pfault.wqe.wqe_index);
151                 mlx5_core_dbg(dev,
152                               "PAGE_FAULT: bytes_committed: 0x%06x\n",
153                               pfault.bytes_committed);
154                 break;
155
156         default:
157                 mlx5_core_warn(dev,
158                                "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
159                                eqe->sub_type, qpn);
160                 /* Unsupported page faults should still be resolved by the
161                  * page fault handler
162                  */
163         }
164
165         if (qp->pfault_handler) {
166                 qp->pfault_handler(qp, &pfault);
167         } else {
168                 mlx5_core_err(dev,
169                               "ODP event for QP %08x, without a fault handler in QP\n",
170                               qpn);
171                 /* Page fault will remain unresolved. QP will hang until it is
172                  * destroyed
173                  */
174         }
175
176         mlx5_core_put_rsc(common);
177 }
178 #endif
179
180 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
181                         struct mlx5_core_qp *qp,
182                         struct mlx5_create_qp_mbox_in *in,
183                         int inlen)
184 {
185         struct mlx5_qp_table *table = &dev->priv.qp_table;
186         struct mlx5_create_qp_mbox_out out;
187         struct mlx5_destroy_qp_mbox_in din;
188         struct mlx5_destroy_qp_mbox_out dout;
189         int err;
190         void *qpc;
191
192         memset(&out, 0, sizeof(out));
193         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
194
195         if (dev->issi) {
196                 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
197                 /* 0xffffff means we ask to work with cqe version 0 */
198                 MLX5_SET(qpc, qpc, user_index, 0xffffff);
199         }
200
201         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
202         if (err) {
203                 mlx5_core_warn(dev, "ret %d\n", err);
204                 return err;
205         }
206
207         if (out.hdr.status) {
208                 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
209                                atomic_read(&dev->num_qps));
210                 return mlx5_cmd_status_to_err(&out.hdr);
211         }
212
213         qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
214         mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
215
216         qp->common.res = MLX5_RES_QP;
217         spin_lock_irq(&table->lock);
218         err = radix_tree_insert(&table->tree, qp->qpn, qp);
219         spin_unlock_irq(&table->lock);
220         if (err) {
221                 mlx5_core_warn(dev, "err %d\n", err);
222                 goto err_cmd;
223         }
224
225         err = mlx5_debug_qp_add(dev, qp);
226         if (err)
227                 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
228                               qp->qpn);
229
230         qp->pid = current->pid;
231         atomic_set(&qp->common.refcount, 1);
232         atomic_inc(&dev->num_qps);
233         init_completion(&qp->common.free);
234
235         return 0;
236
237 err_cmd:
238         memset(&din, 0, sizeof(din));
239         memset(&dout, 0, sizeof(dout));
240         din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
241         din.qpn = cpu_to_be32(qp->qpn);
242         mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
243
244         return err;
245 }
246 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
247
248 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
249                          struct mlx5_core_qp *qp)
250 {
251         struct mlx5_destroy_qp_mbox_in in;
252         struct mlx5_destroy_qp_mbox_out out;
253         struct mlx5_qp_table *table = &dev->priv.qp_table;
254         unsigned long flags;
255         int err;
256
257         mlx5_debug_qp_remove(dev, qp);
258
259         spin_lock_irqsave(&table->lock, flags);
260         radix_tree_delete(&table->tree, qp->qpn);
261         spin_unlock_irqrestore(&table->lock, flags);
262
263         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
264         wait_for_completion(&qp->common.free);
265
266         memset(&in, 0, sizeof(in));
267         memset(&out, 0, sizeof(out));
268         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
269         in.qpn = cpu_to_be32(qp->qpn);
270         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
271         if (err)
272                 return err;
273
274         if (out.hdr.status)
275                 return mlx5_cmd_status_to_err(&out.hdr);
276
277         atomic_dec(&dev->num_qps);
278         return 0;
279 }
280 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
281
282 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
283                         enum mlx5_qp_state new_state,
284                         struct mlx5_modify_qp_mbox_in *in, int sqd_event,
285                         struct mlx5_core_qp *qp)
286 {
287         static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
288                 [MLX5_QP_STATE_RST] = {
289                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
290                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
291                         [MLX5_QP_STATE_INIT]    = MLX5_CMD_OP_RST2INIT_QP,
292                 },
293                 [MLX5_QP_STATE_INIT]  = {
294                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
295                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
296                         [MLX5_QP_STATE_INIT]    = MLX5_CMD_OP_INIT2INIT_QP,
297                         [MLX5_QP_STATE_RTR]     = MLX5_CMD_OP_INIT2RTR_QP,
298                 },
299                 [MLX5_QP_STATE_RTR]   = {
300                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
301                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
302                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_RTR2RTS_QP,
303                 },
304                 [MLX5_QP_STATE_RTS]   = {
305                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
306                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
307                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_RTS2RTS_QP,
308                 },
309                 [MLX5_QP_STATE_SQD] = {
310                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
311                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
312                 },
313                 [MLX5_QP_STATE_SQER] = {
314                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
315                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
316                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_SQERR2RTS_QP,
317                 },
318                 [MLX5_QP_STATE_ERR] = {
319                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
320                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
321                 }
322         };
323
324         struct mlx5_modify_qp_mbox_out out;
325         int err = 0;
326         u16 op;
327
328         if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
329             !optab[cur_state][new_state])
330                 return -EINVAL;
331
332         memset(&out, 0, sizeof(out));
333         op = optab[cur_state][new_state];
334         in->hdr.opcode = cpu_to_be16(op);
335         in->qpn = cpu_to_be32(qp->qpn);
336         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
337         if (err)
338                 return err;
339
340         return mlx5_cmd_status_to_err(&out.hdr);
341 }
342 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
343
344 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
345 {
346         struct mlx5_qp_table *table = &dev->priv.qp_table;
347
348         memset(table, 0, sizeof(*table));
349         spin_lock_init(&table->lock);
350         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
351         mlx5_qp_debugfs_init(dev);
352 }
353
354 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
355 {
356         mlx5_qp_debugfs_cleanup(dev);
357 }
358
359 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
360                        struct mlx5_query_qp_mbox_out *out, int outlen)
361 {
362         struct mlx5_query_qp_mbox_in in;
363         int err;
364
365         memset(&in, 0, sizeof(in));
366         memset(out, 0, outlen);
367         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
368         in.qpn = cpu_to_be32(qp->qpn);
369         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
370         if (err)
371                 return err;
372
373         if (out->hdr.status)
374                 return mlx5_cmd_status_to_err(&out->hdr);
375
376         return err;
377 }
378 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
379
380 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
381 {
382         struct mlx5_alloc_xrcd_mbox_in in;
383         struct mlx5_alloc_xrcd_mbox_out out;
384         int err;
385
386         memset(&in, 0, sizeof(in));
387         memset(&out, 0, sizeof(out));
388         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
389         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
390         if (err)
391                 return err;
392
393         if (out.hdr.status)
394                 err = mlx5_cmd_status_to_err(&out.hdr);
395         else
396                 *xrcdn = be32_to_cpu(out.xrcdn);
397
398         return err;
399 }
400 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
401
402 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
403 {
404         struct mlx5_dealloc_xrcd_mbox_in in;
405         struct mlx5_dealloc_xrcd_mbox_out out;
406         int err;
407
408         memset(&in, 0, sizeof(in));
409         memset(&out, 0, sizeof(out));
410         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
411         in.xrcdn = cpu_to_be32(xrcdn);
412         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
413         if (err)
414                 return err;
415
416         if (out.hdr.status)
417                 err = mlx5_cmd_status_to_err(&out.hdr);
418
419         return err;
420 }
421 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
422
423 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
424 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
425                                 u8 flags, int error)
426 {
427         struct mlx5_page_fault_resume_mbox_in in;
428         struct mlx5_page_fault_resume_mbox_out out;
429         int err;
430
431         memset(&in, 0, sizeof(in));
432         memset(&out, 0, sizeof(out));
433         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
434         in.hdr.opmod = 0;
435         flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
436                   MLX5_PAGE_FAULT_RESUME_WRITE     |
437                   MLX5_PAGE_FAULT_RESUME_RDMA);
438         flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
439         in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
440                                    (flags << MLX5_QPN_BITS));
441         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
442         if (err)
443                 return err;
444
445         if (out.hdr.status)
446                 err = mlx5_cmd_status_to_err(&out.hdr);
447
448         return err;
449 }
450 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
451 #endif