]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/usnic/usnic_ib_verbs.c
Merge branch 'for-linus' into for-next
[karo-tx-linux.git] / drivers / infiniband / hw / usnic / usnic_ib_verbs.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  */
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22
23 #include <rdma/ib_user_verbs.h>
24 #include <rdma/ib_addr.h>
25
26 #include "usnic_abi.h"
27 #include "usnic_ib.h"
28 #include "usnic_common_util.h"
29 #include "usnic_ib_qp_grp.h"
30 #include "usnic_fwd.h"
31 #include "usnic_log.h"
32 #include "usnic_uiom.h"
33 #include "usnic_transport.h"
34
35 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
36
37 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
38 {
39         *fw_ver = (u64) *fw_ver_str;
40 }
41
42 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
43                                         struct ib_udata *udata)
44 {
45         struct usnic_ib_dev *us_ibdev;
46         struct usnic_ib_create_qp_resp resp;
47         struct pci_dev *pdev;
48         struct vnic_dev_bar *bar;
49         struct usnic_vnic_res_chunk *chunk;
50         struct usnic_ib_qp_grp_flow *default_flow;
51         int i, err;
52
53         memset(&resp, 0, sizeof(resp));
54
55         us_ibdev = qp_grp->vf->pf;
56         pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
57         if (!pdev) {
58                 usnic_err("Failed to get pdev of qp_grp %d\n",
59                                 qp_grp->grp_id);
60                 return -EFAULT;
61         }
62
63         bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
64         if (!bar) {
65                 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
66                                 qp_grp->grp_id, pci_name(pdev));
67                 return -EFAULT;
68         }
69
70         resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
71         resp.bar_bus_addr = bar->bus_addr;
72         resp.bar_len = bar->len;
73
74         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
75         if (IS_ERR_OR_NULL(chunk)) {
76                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
77                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
78                         qp_grp->grp_id,
79                         PTR_ERR(chunk));
80                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
81         }
82
83         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
84         resp.rq_cnt = chunk->cnt;
85         for (i = 0; i < chunk->cnt; i++)
86                 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
87
88         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
89         if (IS_ERR_OR_NULL(chunk)) {
90                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
91                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
92                         qp_grp->grp_id,
93                         PTR_ERR(chunk));
94                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
95         }
96
97         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
98         resp.wq_cnt = chunk->cnt;
99         for (i = 0; i < chunk->cnt; i++)
100                 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
101
102         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
103         if (IS_ERR_OR_NULL(chunk)) {
104                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
105                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
106                         qp_grp->grp_id,
107                         PTR_ERR(chunk));
108                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
109         }
110
111         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
112         resp.cq_cnt = chunk->cnt;
113         for (i = 0; i < chunk->cnt; i++)
114                 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
115
116         default_flow = list_first_entry(&qp_grp->flows_lst,
117                                         struct usnic_ib_qp_grp_flow, link);
118         resp.transport = default_flow->trans_type;
119
120         err = ib_copy_to_udata(udata, &resp, sizeof(resp));
121         if (err) {
122                 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
123                 return err;
124         }
125
126         return 0;
127 }
128
129 static struct usnic_ib_qp_grp*
130 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
131                                 struct usnic_ib_pd *pd,
132                                 struct usnic_transport_spec *trans_spec,
133                                 struct usnic_vnic_res_spec *res_spec)
134 {
135         struct usnic_ib_vf *vf;
136         struct usnic_vnic *vnic;
137         struct usnic_ib_qp_grp *qp_grp;
138         struct device *dev, **dev_list;
139         int i, found = 0;
140
141         BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
142
143         if (list_empty(&us_ibdev->vf_dev_list)) {
144                 usnic_info("No vfs to allocate\n");
145                 return NULL;
146         }
147
148         if (usnic_ib_share_vf) {
149                 /* Try to find resouces on a used vf which is in pd */
150                 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
151                 for (i = 0; dev_list[i]; i++) {
152                         dev = dev_list[i];
153                         vf = pci_get_drvdata(to_pci_dev(dev));
154                         spin_lock(&vf->lock);
155                         vnic = vf->vnic;
156                         if (!usnic_vnic_check_room(vnic, res_spec)) {
157                                 usnic_dbg("Found used vnic %s from %s\n",
158                                                 us_ibdev->ib_dev.name,
159                                                 pci_name(usnic_vnic_get_pdev(
160                                                                         vnic)));
161                                 found = 1;
162                                 break;
163                         }
164                         spin_unlock(&vf->lock);
165
166                 }
167                 usnic_uiom_free_dev_list(dev_list);
168         }
169
170         if (!found) {
171                 /* Try to find resources on an unused vf */
172                 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
173                         spin_lock(&vf->lock);
174                         vnic = vf->vnic;
175                         if (vf->qp_grp_ref_cnt == 0 &&
176                                 usnic_vnic_check_room(vnic, res_spec) == 0) {
177                                 found = 1;
178                                 break;
179                         }
180                         spin_unlock(&vf->lock);
181                 }
182         }
183
184         if (!found) {
185                 usnic_info("No free qp grp found on %s\n",
186                                 us_ibdev->ib_dev.name);
187                 return ERR_PTR(-ENOMEM);
188         }
189
190         qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
191                                                 trans_spec);
192         spin_unlock(&vf->lock);
193         if (IS_ERR_OR_NULL(qp_grp)) {
194                 usnic_err("Failed to allocate qp_grp\n");
195                 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
196         }
197
198         return qp_grp;
199 }
200
201 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
202 {
203         struct usnic_ib_vf *vf = qp_grp->vf;
204
205         WARN_ON(qp_grp->state != IB_QPS_RESET);
206
207         spin_lock(&vf->lock);
208         usnic_ib_qp_grp_destroy(qp_grp);
209         spin_unlock(&vf->lock);
210 }
211
212 static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
213                                         u8 *active_width)
214 {
215         if (speed <= 10000) {
216                 *active_width = IB_WIDTH_1X;
217                 *active_speed = IB_SPEED_FDR10;
218         } else if (speed <= 20000) {
219                 *active_width = IB_WIDTH_4X;
220                 *active_speed = IB_SPEED_DDR;
221         } else if (speed <= 30000) {
222                 *active_width = IB_WIDTH_4X;
223                 *active_speed = IB_SPEED_QDR;
224         } else if (speed <= 40000) {
225                 *active_width = IB_WIDTH_4X;
226                 *active_speed = IB_SPEED_FDR10;
227         } else {
228                 *active_width = IB_WIDTH_4X;
229                 *active_speed = IB_SPEED_EDR;
230         }
231 }
232
233 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
234 {
235         if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
236                         cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
237                 return -EINVAL;
238
239         return 0;
240 }
241
242 /* Start of ib callback functions */
243
244 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
245                                                 u8 port_num)
246 {
247         return IB_LINK_LAYER_ETHERNET;
248 }
249
250 int usnic_ib_query_device(struct ib_device *ibdev,
251                           struct ib_device_attr *props,
252                           struct ib_udata *uhw)
253 {
254         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
255         union ib_gid gid;
256         struct ethtool_drvinfo info;
257         struct ethtool_cmd cmd;
258         int qp_per_vf;
259
260         usnic_dbg("\n");
261         if (uhw->inlen || uhw->outlen)
262                 return -EINVAL;
263
264         mutex_lock(&us_ibdev->usdev_lock);
265         us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
266         us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
267         memset(props, 0, sizeof(*props));
268         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
269                         &gid.raw[0]);
270         memcpy(&props->sys_image_guid, &gid.global.interface_id,
271                 sizeof(gid.global.interface_id));
272         usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
273         props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
274         props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
275         props->vendor_id = PCI_VENDOR_ID_CISCO;
276         props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
277         props->hw_ver = us_ibdev->pdev->subsystem_device;
278         qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
279                         us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
280         props->max_qp = qp_per_vf *
281                 atomic_read(&us_ibdev->vf_cnt.refcount);
282         props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
283                 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
284         props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
285                 atomic_read(&us_ibdev->vf_cnt.refcount);
286         props->max_pd = USNIC_UIOM_MAX_PD_CNT;
287         props->max_mr = USNIC_UIOM_MAX_MR_CNT;
288         props->local_ca_ack_delay = 0;
289         props->max_pkeys = 0;
290         props->atomic_cap = IB_ATOMIC_NONE;
291         props->masked_atomic_cap = props->atomic_cap;
292         props->max_qp_rd_atom = 0;
293         props->max_qp_init_rd_atom = 0;
294         props->max_res_rd_atom = 0;
295         props->max_srq = 0;
296         props->max_srq_wr = 0;
297         props->max_srq_sge = 0;
298         props->max_fast_reg_page_list_len = 0;
299         props->max_mcast_grp = 0;
300         props->max_mcast_qp_attach = 0;
301         props->max_total_mcast_qp_attach = 0;
302         props->max_map_per_fmr = 0;
303         /* Owned by Userspace
304          * max_qp_wr, max_sge, max_sge_rd, max_cqe */
305         mutex_unlock(&us_ibdev->usdev_lock);
306
307         return 0;
308 }
309
310 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
311                                 struct ib_port_attr *props)
312 {
313         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
314         struct ethtool_cmd cmd;
315
316         usnic_dbg("\n");
317
318         mutex_lock(&us_ibdev->usdev_lock);
319         us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
320         memset(props, 0, sizeof(*props));
321
322         props->lid = 0;
323         props->lmc = 1;
324         props->sm_lid = 0;
325         props->sm_sl = 0;
326
327         if (!us_ibdev->ufdev->link_up) {
328                 props->state = IB_PORT_DOWN;
329                 props->phys_state = 3;
330         } else if (!us_ibdev->ufdev->inaddr) {
331                 props->state = IB_PORT_INIT;
332                 props->phys_state = 4;
333         } else {
334                 props->state = IB_PORT_ACTIVE;
335                 props->phys_state = 5;
336         }
337
338         props->port_cap_flags = 0;
339         props->gid_tbl_len = 1;
340         props->pkey_tbl_len = 1;
341         props->bad_pkey_cntr = 0;
342         props->qkey_viol_cntr = 0;
343         eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
344                                 &props->active_width);
345         props->max_mtu = IB_MTU_4096;
346         props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
347         /* Userspace will adjust for hdrs */
348         props->max_msg_sz = us_ibdev->ufdev->mtu;
349         props->max_vl_num = 1;
350         mutex_unlock(&us_ibdev->usdev_lock);
351
352         return 0;
353 }
354
355 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
356                                 int qp_attr_mask,
357                                 struct ib_qp_init_attr *qp_init_attr)
358 {
359         struct usnic_ib_qp_grp *qp_grp;
360         struct usnic_ib_vf *vf;
361         int err;
362
363         usnic_dbg("\n");
364
365         memset(qp_attr, 0, sizeof(*qp_attr));
366         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
367
368         qp_grp = to_uqp_grp(qp);
369         vf = qp_grp->vf;
370         mutex_lock(&vf->pf->usdev_lock);
371         usnic_dbg("\n");
372         qp_attr->qp_state = qp_grp->state;
373         qp_attr->cur_qp_state = qp_grp->state;
374
375         switch (qp_grp->ibqp.qp_type) {
376         case IB_QPT_UD:
377                 qp_attr->qkey = 0;
378                 break;
379         default:
380                 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
381                 err = -EINVAL;
382                 goto err_out;
383         }
384
385         mutex_unlock(&vf->pf->usdev_lock);
386         return 0;
387
388 err_out:
389         mutex_unlock(&vf->pf->usdev_lock);
390         return err;
391 }
392
393 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
394                                 union ib_gid *gid)
395 {
396
397         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
398         usnic_dbg("\n");
399
400         if (index > 1)
401                 return -EINVAL;
402
403         mutex_lock(&us_ibdev->usdev_lock);
404         memset(&(gid->raw[0]), 0, sizeof(gid->raw));
405         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
406                         &gid->raw[0]);
407         mutex_unlock(&us_ibdev->usdev_lock);
408
409         return 0;
410 }
411
412 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
413                                 u16 *pkey)
414 {
415         if (index > 1)
416                 return -EINVAL;
417
418         *pkey = 0xffff;
419         return 0;
420 }
421
422 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
423                                         struct ib_ucontext *context,
424                                         struct ib_udata *udata)
425 {
426         struct usnic_ib_pd *pd;
427         void *umem_pd;
428
429         usnic_dbg("\n");
430
431         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
432         if (!pd)
433                 return ERR_PTR(-ENOMEM);
434
435         umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
436         if (IS_ERR_OR_NULL(umem_pd)) {
437                 kfree(pd);
438                 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
439         }
440
441         usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
442                         pd, context, ibdev->name);
443         return &pd->ibpd;
444 }
445
446 int usnic_ib_dealloc_pd(struct ib_pd *pd)
447 {
448         usnic_info("freeing domain 0x%p\n", pd);
449
450         usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
451         kfree(pd);
452         return 0;
453 }
454
455 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
456                                         struct ib_qp_init_attr *init_attr,
457                                         struct ib_udata *udata)
458 {
459         int err;
460         struct usnic_ib_dev *us_ibdev;
461         struct usnic_ib_qp_grp *qp_grp;
462         struct usnic_ib_ucontext *ucontext;
463         int cq_cnt;
464         struct usnic_vnic_res_spec res_spec;
465         struct usnic_ib_create_qp_cmd cmd;
466         struct usnic_transport_spec trans_spec;
467
468         usnic_dbg("\n");
469
470         ucontext = to_uucontext(pd->uobject->context);
471         us_ibdev = to_usdev(pd->device);
472
473         if (init_attr->create_flags)
474                 return ERR_PTR(-EINVAL);
475
476         err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
477         if (err) {
478                 usnic_err("%s: cannot copy udata for create_qp\n",
479                                 us_ibdev->ib_dev.name);
480                 return ERR_PTR(-EINVAL);
481         }
482
483         err = create_qp_validate_user_data(cmd);
484         if (err) {
485                 usnic_err("%s: Failed to validate user data\n",
486                                 us_ibdev->ib_dev.name);
487                 return ERR_PTR(-EINVAL);
488         }
489
490         if (init_attr->qp_type != IB_QPT_UD) {
491                 usnic_err("%s asked to make a non-UD QP: %d\n",
492                                 us_ibdev->ib_dev.name, init_attr->qp_type);
493                 return ERR_PTR(-EINVAL);
494         }
495
496         trans_spec = cmd.spec;
497         mutex_lock(&us_ibdev->usdev_lock);
498         cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
499         res_spec = min_transport_spec[trans_spec.trans_type];
500         usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
501         qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
502                                                 &trans_spec,
503                                                 &res_spec);
504         if (IS_ERR_OR_NULL(qp_grp)) {
505                 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
506                 goto out_release_mutex;
507         }
508
509         err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
510         if (err) {
511                 err = -EBUSY;
512                 goto out_release_qp_grp;
513         }
514
515         qp_grp->ctx = ucontext;
516         list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
517         usnic_ib_log_vf(qp_grp->vf);
518         mutex_unlock(&us_ibdev->usdev_lock);
519         return &qp_grp->ibqp;
520
521 out_release_qp_grp:
522         qp_grp_destroy(qp_grp);
523 out_release_mutex:
524         mutex_unlock(&us_ibdev->usdev_lock);
525         return ERR_PTR(err);
526 }
527
528 int usnic_ib_destroy_qp(struct ib_qp *qp)
529 {
530         struct usnic_ib_qp_grp *qp_grp;
531         struct usnic_ib_vf *vf;
532
533         usnic_dbg("\n");
534
535         qp_grp = to_uqp_grp(qp);
536         vf = qp_grp->vf;
537         mutex_lock(&vf->pf->usdev_lock);
538         if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
539                 usnic_err("Failed to move qp grp %u to reset\n",
540                                 qp_grp->grp_id);
541         }
542
543         list_del(&qp_grp->link);
544         qp_grp_destroy(qp_grp);
545         mutex_unlock(&vf->pf->usdev_lock);
546
547         return 0;
548 }
549
550 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
551                                 int attr_mask, struct ib_udata *udata)
552 {
553         struct usnic_ib_qp_grp *qp_grp;
554         int status;
555         usnic_dbg("\n");
556
557         qp_grp = to_uqp_grp(ibqp);
558
559         /* TODO: Future Support All States */
560         mutex_lock(&qp_grp->vf->pf->usdev_lock);
561         if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
562                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
563         } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
564                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
565         } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
566                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
567         } else {
568                 usnic_err("Unexpected combination mask: %u state: %u\n",
569                                 attr_mask & IB_QP_STATE, attr->qp_state);
570                 status = -EINVAL;
571         }
572
573         mutex_unlock(&qp_grp->vf->pf->usdev_lock);
574         return status;
575 }
576
577 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
578                                  const struct ib_cq_init_attr *attr,
579                                  struct ib_ucontext *context,
580                                  struct ib_udata *udata)
581 {
582         struct ib_cq *cq;
583
584         usnic_dbg("\n");
585         if (attr->flags)
586                 return ERR_PTR(-EINVAL);
587
588         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
589         if (!cq)
590                 return ERR_PTR(-EBUSY);
591
592         return cq;
593 }
594
595 int usnic_ib_destroy_cq(struct ib_cq *cq)
596 {
597         usnic_dbg("\n");
598         kfree(cq);
599         return 0;
600 }
601
602 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
603                                         u64 virt_addr, int access_flags,
604                                         struct ib_udata *udata)
605 {
606         struct usnic_ib_mr *mr;
607         int err;
608
609         usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
610                         virt_addr, length);
611
612         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
613         if (IS_ERR_OR_NULL(mr))
614                 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
615
616         mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
617                                         access_flags, 0);
618         if (IS_ERR_OR_NULL(mr->umem)) {
619                 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
620                 goto err_free;
621         }
622
623         mr->ibmr.lkey = mr->ibmr.rkey = 0;
624         return &mr->ibmr;
625
626 err_free:
627         kfree(mr);
628         return ERR_PTR(err);
629 }
630
631 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
632 {
633         struct usnic_ib_mr *mr = to_umr(ibmr);
634
635         usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
636
637         usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
638         kfree(mr);
639         return 0;
640 }
641
642 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
643                                                         struct ib_udata *udata)
644 {
645         struct usnic_ib_ucontext *context;
646         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
647         usnic_dbg("\n");
648
649         context = kmalloc(sizeof(*context), GFP_KERNEL);
650         if (!context)
651                 return ERR_PTR(-ENOMEM);
652
653         INIT_LIST_HEAD(&context->qp_grp_list);
654         mutex_lock(&us_ibdev->usdev_lock);
655         list_add_tail(&context->link, &us_ibdev->ctx_list);
656         mutex_unlock(&us_ibdev->usdev_lock);
657
658         return &context->ibucontext;
659 }
660
661 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
662 {
663         struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
664         struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
665         usnic_dbg("\n");
666
667         mutex_lock(&us_ibdev->usdev_lock);
668         BUG_ON(!list_empty(&context->qp_grp_list));
669         list_del(&context->link);
670         mutex_unlock(&us_ibdev->usdev_lock);
671         kfree(context);
672         return 0;
673 }
674
675 int usnic_ib_mmap(struct ib_ucontext *context,
676                                 struct vm_area_struct *vma)
677 {
678         struct usnic_ib_ucontext *uctx = to_ucontext(context);
679         struct usnic_ib_dev *us_ibdev;
680         struct usnic_ib_qp_grp *qp_grp;
681         struct usnic_ib_vf *vf;
682         struct vnic_dev_bar *bar;
683         dma_addr_t bus_addr;
684         unsigned int len;
685         unsigned int vfid;
686
687         usnic_dbg("\n");
688
689         us_ibdev = to_usdev(context->device);
690         vma->vm_flags |= VM_IO;
691         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
692         vfid = vma->vm_pgoff;
693         usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
694                         vma->vm_pgoff, PAGE_SHIFT, vfid);
695
696         mutex_lock(&us_ibdev->usdev_lock);
697         list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
698                 vf = qp_grp->vf;
699                 if (usnic_vnic_get_index(vf->vnic) == vfid) {
700                         bar = usnic_vnic_get_bar(vf->vnic, 0);
701                         if ((vma->vm_end - vma->vm_start) != bar->len) {
702                                 usnic_err("Bar0 Len %lu - Request map %lu\n",
703                                                 bar->len,
704                                                 vma->vm_end - vma->vm_start);
705                                 mutex_unlock(&us_ibdev->usdev_lock);
706                                 return -EINVAL;
707                         }
708                         bus_addr = bar->bus_addr;
709                         len = bar->len;
710                         usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
711                                         &bus_addr, bar->vaddr, bar->len);
712                         mutex_unlock(&us_ibdev->usdev_lock);
713
714                         return remap_pfn_range(vma,
715                                                 vma->vm_start,
716                                                 bus_addr >> PAGE_SHIFT,
717                                                 len, vma->vm_page_prot);
718                 }
719         }
720
721         mutex_unlock(&us_ibdev->usdev_lock);
722         usnic_err("No VF %u found\n", vfid);
723         return -EINVAL;
724 }
725
726 /* In ib callbacks section -  Start of stub funcs */
727 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
728                                         struct ib_ah_attr *ah_attr)
729 {
730         usnic_dbg("\n");
731         return ERR_PTR(-EPERM);
732 }
733
734 int usnic_ib_destroy_ah(struct ib_ah *ah)
735 {
736         usnic_dbg("\n");
737         return -EINVAL;
738 }
739
740 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
741                                 struct ib_send_wr **bad_wr)
742 {
743         usnic_dbg("\n");
744         return -EINVAL;
745 }
746
747 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
748                                 struct ib_recv_wr **bad_wr)
749 {
750         usnic_dbg("\n");
751         return -EINVAL;
752 }
753
754 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
755                                 struct ib_wc *wc)
756 {
757         usnic_dbg("\n");
758         return -EINVAL;
759 }
760
761 int usnic_ib_req_notify_cq(struct ib_cq *cq,
762                                         enum ib_cq_notify_flags flags)
763 {
764         usnic_dbg("\n");
765         return -EINVAL;
766 }
767
768 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
769 {
770         usnic_dbg("\n");
771         return ERR_PTR(-ENOMEM);
772 }
773
774
775 /* In ib callbacks section - End of stub funcs */
776 /* End of ib callbacks section */