]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/usnic/usnic_ib_verbs.c
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[karo-tx-linux.git] / drivers / infiniband / hw / usnic / usnic_ib_verbs.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
37
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
40
41 #include "usnic_abi.h"
42 #include "usnic_ib.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_fwd.h"
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_transport.h"
49
50 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
51
52 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
53 {
54         *fw_ver = (u64) *fw_ver_str;
55 }
56
57 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
58                                         struct ib_udata *udata)
59 {
60         struct usnic_ib_dev *us_ibdev;
61         struct usnic_ib_create_qp_resp resp;
62         struct pci_dev *pdev;
63         struct vnic_dev_bar *bar;
64         struct usnic_vnic_res_chunk *chunk;
65         struct usnic_ib_qp_grp_flow *default_flow;
66         int i, err;
67
68         memset(&resp, 0, sizeof(resp));
69
70         us_ibdev = qp_grp->vf->pf;
71         pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
72         if (!pdev) {
73                 usnic_err("Failed to get pdev of qp_grp %d\n",
74                                 qp_grp->grp_id);
75                 return -EFAULT;
76         }
77
78         bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
79         if (!bar) {
80                 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
81                                 qp_grp->grp_id, pci_name(pdev));
82                 return -EFAULT;
83         }
84
85         resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
86         resp.bar_bus_addr = bar->bus_addr;
87         resp.bar_len = bar->len;
88
89         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
90         if (IS_ERR_OR_NULL(chunk)) {
91                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
92                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
93                         qp_grp->grp_id,
94                         PTR_ERR(chunk));
95                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
96         }
97
98         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
99         resp.rq_cnt = chunk->cnt;
100         for (i = 0; i < chunk->cnt; i++)
101                 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
102
103         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
104         if (IS_ERR_OR_NULL(chunk)) {
105                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
106                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
107                         qp_grp->grp_id,
108                         PTR_ERR(chunk));
109                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
110         }
111
112         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
113         resp.wq_cnt = chunk->cnt;
114         for (i = 0; i < chunk->cnt; i++)
115                 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
116
117         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
118         if (IS_ERR_OR_NULL(chunk)) {
119                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
120                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
121                         qp_grp->grp_id,
122                         PTR_ERR(chunk));
123                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
124         }
125
126         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
127         resp.cq_cnt = chunk->cnt;
128         for (i = 0; i < chunk->cnt; i++)
129                 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
130
131         default_flow = list_first_entry(&qp_grp->flows_lst,
132                                         struct usnic_ib_qp_grp_flow, link);
133         resp.transport = default_flow->trans_type;
134
135         err = ib_copy_to_udata(udata, &resp, sizeof(resp));
136         if (err) {
137                 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
138                 return err;
139         }
140
141         return 0;
142 }
143
144 static struct usnic_ib_qp_grp*
145 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
146                                 struct usnic_ib_pd *pd,
147                                 struct usnic_transport_spec *trans_spec,
148                                 struct usnic_vnic_res_spec *res_spec)
149 {
150         struct usnic_ib_vf *vf;
151         struct usnic_vnic *vnic;
152         struct usnic_ib_qp_grp *qp_grp;
153         struct device *dev, **dev_list;
154         int i, found = 0;
155
156         BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
157
158         if (list_empty(&us_ibdev->vf_dev_list)) {
159                 usnic_info("No vfs to allocate\n");
160                 return NULL;
161         }
162
163         if (usnic_ib_share_vf) {
164                 /* Try to find resouces on a used vf which is in pd */
165                 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
166                 for (i = 0; dev_list[i]; i++) {
167                         dev = dev_list[i];
168                         vf = pci_get_drvdata(to_pci_dev(dev));
169                         spin_lock(&vf->lock);
170                         vnic = vf->vnic;
171                         if (!usnic_vnic_check_room(vnic, res_spec)) {
172                                 usnic_dbg("Found used vnic %s from %s\n",
173                                                 us_ibdev->ib_dev.name,
174                                                 pci_name(usnic_vnic_get_pdev(
175                                                                         vnic)));
176                                 found = 1;
177                                 break;
178                         }
179                         spin_unlock(&vf->lock);
180
181                 }
182                 usnic_uiom_free_dev_list(dev_list);
183         }
184
185         if (!found) {
186                 /* Try to find resources on an unused vf */
187                 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
188                         spin_lock(&vf->lock);
189                         vnic = vf->vnic;
190                         if (vf->qp_grp_ref_cnt == 0 &&
191                                 usnic_vnic_check_room(vnic, res_spec) == 0) {
192                                 found = 1;
193                                 break;
194                         }
195                         spin_unlock(&vf->lock);
196                 }
197         }
198
199         if (!found) {
200                 usnic_info("No free qp grp found on %s\n",
201                                 us_ibdev->ib_dev.name);
202                 return ERR_PTR(-ENOMEM);
203         }
204
205         qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
206                                                 trans_spec);
207         spin_unlock(&vf->lock);
208         if (IS_ERR_OR_NULL(qp_grp)) {
209                 usnic_err("Failed to allocate qp_grp\n");
210                 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
211         }
212
213         return qp_grp;
214 }
215
216 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
217 {
218         struct usnic_ib_vf *vf = qp_grp->vf;
219
220         WARN_ON(qp_grp->state != IB_QPS_RESET);
221
222         spin_lock(&vf->lock);
223         usnic_ib_qp_grp_destroy(qp_grp);
224         spin_unlock(&vf->lock);
225 }
226
227 static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
228                                         u8 *active_width)
229 {
230         if (speed <= 10000) {
231                 *active_width = IB_WIDTH_1X;
232                 *active_speed = IB_SPEED_FDR10;
233         } else if (speed <= 20000) {
234                 *active_width = IB_WIDTH_4X;
235                 *active_speed = IB_SPEED_DDR;
236         } else if (speed <= 30000) {
237                 *active_width = IB_WIDTH_4X;
238                 *active_speed = IB_SPEED_QDR;
239         } else if (speed <= 40000) {
240                 *active_width = IB_WIDTH_4X;
241                 *active_speed = IB_SPEED_FDR10;
242         } else {
243                 *active_width = IB_WIDTH_4X;
244                 *active_speed = IB_SPEED_EDR;
245         }
246 }
247
248 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
249 {
250         if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
251                         cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
252                 return -EINVAL;
253
254         return 0;
255 }
256
257 /* Start of ib callback functions */
258
259 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
260                                                 u8 port_num)
261 {
262         return IB_LINK_LAYER_ETHERNET;
263 }
264
265 int usnic_ib_query_device(struct ib_device *ibdev,
266                           struct ib_device_attr *props,
267                           struct ib_udata *uhw)
268 {
269         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
270         union ib_gid gid;
271         struct ethtool_drvinfo info;
272         struct ethtool_cmd cmd;
273         int qp_per_vf;
274
275         usnic_dbg("\n");
276         if (uhw->inlen || uhw->outlen)
277                 return -EINVAL;
278
279         mutex_lock(&us_ibdev->usdev_lock);
280         us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
281         us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
282         memset(props, 0, sizeof(*props));
283         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
284                         &gid.raw[0]);
285         memcpy(&props->sys_image_guid, &gid.global.interface_id,
286                 sizeof(gid.global.interface_id));
287         usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
288         props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
289         props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
290         props->vendor_id = PCI_VENDOR_ID_CISCO;
291         props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
292         props->hw_ver = us_ibdev->pdev->subsystem_device;
293         qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
294                         us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
295         props->max_qp = qp_per_vf *
296                 atomic_read(&us_ibdev->vf_cnt.refcount);
297         props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
298                 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
299         props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
300                 atomic_read(&us_ibdev->vf_cnt.refcount);
301         props->max_pd = USNIC_UIOM_MAX_PD_CNT;
302         props->max_mr = USNIC_UIOM_MAX_MR_CNT;
303         props->local_ca_ack_delay = 0;
304         props->max_pkeys = 0;
305         props->atomic_cap = IB_ATOMIC_NONE;
306         props->masked_atomic_cap = props->atomic_cap;
307         props->max_qp_rd_atom = 0;
308         props->max_qp_init_rd_atom = 0;
309         props->max_res_rd_atom = 0;
310         props->max_srq = 0;
311         props->max_srq_wr = 0;
312         props->max_srq_sge = 0;
313         props->max_fast_reg_page_list_len = 0;
314         props->max_mcast_grp = 0;
315         props->max_mcast_qp_attach = 0;
316         props->max_total_mcast_qp_attach = 0;
317         props->max_map_per_fmr = 0;
318         /* Owned by Userspace
319          * max_qp_wr, max_sge, max_sge_rd, max_cqe */
320         mutex_unlock(&us_ibdev->usdev_lock);
321
322         return 0;
323 }
324
325 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
326                                 struct ib_port_attr *props)
327 {
328         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
329         struct ethtool_cmd cmd;
330
331         usnic_dbg("\n");
332
333         mutex_lock(&us_ibdev->usdev_lock);
334         us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
335         memset(props, 0, sizeof(*props));
336
337         props->lid = 0;
338         props->lmc = 1;
339         props->sm_lid = 0;
340         props->sm_sl = 0;
341
342         if (!us_ibdev->ufdev->link_up) {
343                 props->state = IB_PORT_DOWN;
344                 props->phys_state = 3;
345         } else if (!us_ibdev->ufdev->inaddr) {
346                 props->state = IB_PORT_INIT;
347                 props->phys_state = 4;
348         } else {
349                 props->state = IB_PORT_ACTIVE;
350                 props->phys_state = 5;
351         }
352
353         props->port_cap_flags = 0;
354         props->gid_tbl_len = 1;
355         props->pkey_tbl_len = 1;
356         props->bad_pkey_cntr = 0;
357         props->qkey_viol_cntr = 0;
358         eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
359                                 &props->active_width);
360         props->max_mtu = IB_MTU_4096;
361         props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
362         /* Userspace will adjust for hdrs */
363         props->max_msg_sz = us_ibdev->ufdev->mtu;
364         props->max_vl_num = 1;
365         mutex_unlock(&us_ibdev->usdev_lock);
366
367         return 0;
368 }
369
370 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
371                                 int qp_attr_mask,
372                                 struct ib_qp_init_attr *qp_init_attr)
373 {
374         struct usnic_ib_qp_grp *qp_grp;
375         struct usnic_ib_vf *vf;
376         int err;
377
378         usnic_dbg("\n");
379
380         memset(qp_attr, 0, sizeof(*qp_attr));
381         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
382
383         qp_grp = to_uqp_grp(qp);
384         vf = qp_grp->vf;
385         mutex_lock(&vf->pf->usdev_lock);
386         usnic_dbg("\n");
387         qp_attr->qp_state = qp_grp->state;
388         qp_attr->cur_qp_state = qp_grp->state;
389
390         switch (qp_grp->ibqp.qp_type) {
391         case IB_QPT_UD:
392                 qp_attr->qkey = 0;
393                 break;
394         default:
395                 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
396                 err = -EINVAL;
397                 goto err_out;
398         }
399
400         mutex_unlock(&vf->pf->usdev_lock);
401         return 0;
402
403 err_out:
404         mutex_unlock(&vf->pf->usdev_lock);
405         return err;
406 }
407
408 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
409                                 union ib_gid *gid)
410 {
411
412         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
413         usnic_dbg("\n");
414
415         if (index > 1)
416                 return -EINVAL;
417
418         mutex_lock(&us_ibdev->usdev_lock);
419         memset(&(gid->raw[0]), 0, sizeof(gid->raw));
420         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
421                         &gid->raw[0]);
422         mutex_unlock(&us_ibdev->usdev_lock);
423
424         return 0;
425 }
426
427 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
428                                 u16 *pkey)
429 {
430         if (index > 1)
431                 return -EINVAL;
432
433         *pkey = 0xffff;
434         return 0;
435 }
436
437 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
438                                         struct ib_ucontext *context,
439                                         struct ib_udata *udata)
440 {
441         struct usnic_ib_pd *pd;
442         void *umem_pd;
443
444         usnic_dbg("\n");
445
446         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
447         if (!pd)
448                 return ERR_PTR(-ENOMEM);
449
450         umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
451         if (IS_ERR_OR_NULL(umem_pd)) {
452                 kfree(pd);
453                 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
454         }
455
456         usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
457                         pd, context, ibdev->name);
458         return &pd->ibpd;
459 }
460
461 int usnic_ib_dealloc_pd(struct ib_pd *pd)
462 {
463         usnic_info("freeing domain 0x%p\n", pd);
464
465         usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
466         kfree(pd);
467         return 0;
468 }
469
470 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
471                                         struct ib_qp_init_attr *init_attr,
472                                         struct ib_udata *udata)
473 {
474         int err;
475         struct usnic_ib_dev *us_ibdev;
476         struct usnic_ib_qp_grp *qp_grp;
477         struct usnic_ib_ucontext *ucontext;
478         int cq_cnt;
479         struct usnic_vnic_res_spec res_spec;
480         struct usnic_ib_create_qp_cmd cmd;
481         struct usnic_transport_spec trans_spec;
482
483         usnic_dbg("\n");
484
485         ucontext = to_uucontext(pd->uobject->context);
486         us_ibdev = to_usdev(pd->device);
487
488         if (init_attr->create_flags)
489                 return ERR_PTR(-EINVAL);
490
491         err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
492         if (err) {
493                 usnic_err("%s: cannot copy udata for create_qp\n",
494                                 us_ibdev->ib_dev.name);
495                 return ERR_PTR(-EINVAL);
496         }
497
498         err = create_qp_validate_user_data(cmd);
499         if (err) {
500                 usnic_err("%s: Failed to validate user data\n",
501                                 us_ibdev->ib_dev.name);
502                 return ERR_PTR(-EINVAL);
503         }
504
505         if (init_attr->qp_type != IB_QPT_UD) {
506                 usnic_err("%s asked to make a non-UD QP: %d\n",
507                                 us_ibdev->ib_dev.name, init_attr->qp_type);
508                 return ERR_PTR(-EINVAL);
509         }
510
511         trans_spec = cmd.spec;
512         mutex_lock(&us_ibdev->usdev_lock);
513         cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
514         res_spec = min_transport_spec[trans_spec.trans_type];
515         usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
516         qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
517                                                 &trans_spec,
518                                                 &res_spec);
519         if (IS_ERR_OR_NULL(qp_grp)) {
520                 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
521                 goto out_release_mutex;
522         }
523
524         err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
525         if (err) {
526                 err = -EBUSY;
527                 goto out_release_qp_grp;
528         }
529
530         qp_grp->ctx = ucontext;
531         list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
532         usnic_ib_log_vf(qp_grp->vf);
533         mutex_unlock(&us_ibdev->usdev_lock);
534         return &qp_grp->ibqp;
535
536 out_release_qp_grp:
537         qp_grp_destroy(qp_grp);
538 out_release_mutex:
539         mutex_unlock(&us_ibdev->usdev_lock);
540         return ERR_PTR(err);
541 }
542
543 int usnic_ib_destroy_qp(struct ib_qp *qp)
544 {
545         struct usnic_ib_qp_grp *qp_grp;
546         struct usnic_ib_vf *vf;
547
548         usnic_dbg("\n");
549
550         qp_grp = to_uqp_grp(qp);
551         vf = qp_grp->vf;
552         mutex_lock(&vf->pf->usdev_lock);
553         if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
554                 usnic_err("Failed to move qp grp %u to reset\n",
555                                 qp_grp->grp_id);
556         }
557
558         list_del(&qp_grp->link);
559         qp_grp_destroy(qp_grp);
560         mutex_unlock(&vf->pf->usdev_lock);
561
562         return 0;
563 }
564
565 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
566                                 int attr_mask, struct ib_udata *udata)
567 {
568         struct usnic_ib_qp_grp *qp_grp;
569         int status;
570         usnic_dbg("\n");
571
572         qp_grp = to_uqp_grp(ibqp);
573
574         /* TODO: Future Support All States */
575         mutex_lock(&qp_grp->vf->pf->usdev_lock);
576         if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
577                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
578         } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
579                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
580         } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
581                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
582         } else {
583                 usnic_err("Unexpected combination mask: %u state: %u\n",
584                                 attr_mask & IB_QP_STATE, attr->qp_state);
585                 status = -EINVAL;
586         }
587
588         mutex_unlock(&qp_grp->vf->pf->usdev_lock);
589         return status;
590 }
591
592 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
593                                  const struct ib_cq_init_attr *attr,
594                                  struct ib_ucontext *context,
595                                  struct ib_udata *udata)
596 {
597         struct ib_cq *cq;
598
599         usnic_dbg("\n");
600         if (attr->flags)
601                 return ERR_PTR(-EINVAL);
602
603         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
604         if (!cq)
605                 return ERR_PTR(-EBUSY);
606
607         return cq;
608 }
609
610 int usnic_ib_destroy_cq(struct ib_cq *cq)
611 {
612         usnic_dbg("\n");
613         kfree(cq);
614         return 0;
615 }
616
617 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
618                                         u64 virt_addr, int access_flags,
619                                         struct ib_udata *udata)
620 {
621         struct usnic_ib_mr *mr;
622         int err;
623
624         usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
625                         virt_addr, length);
626
627         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
628         if (IS_ERR_OR_NULL(mr))
629                 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
630
631         mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
632                                         access_flags, 0);
633         if (IS_ERR_OR_NULL(mr->umem)) {
634                 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
635                 goto err_free;
636         }
637
638         mr->ibmr.lkey = mr->ibmr.rkey = 0;
639         return &mr->ibmr;
640
641 err_free:
642         kfree(mr);
643         return ERR_PTR(err);
644 }
645
646 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
647 {
648         struct usnic_ib_mr *mr = to_umr(ibmr);
649
650         usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
651
652         usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
653         kfree(mr);
654         return 0;
655 }
656
657 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
658                                                         struct ib_udata *udata)
659 {
660         struct usnic_ib_ucontext *context;
661         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
662         usnic_dbg("\n");
663
664         context = kmalloc(sizeof(*context), GFP_KERNEL);
665         if (!context)
666                 return ERR_PTR(-ENOMEM);
667
668         INIT_LIST_HEAD(&context->qp_grp_list);
669         mutex_lock(&us_ibdev->usdev_lock);
670         list_add_tail(&context->link, &us_ibdev->ctx_list);
671         mutex_unlock(&us_ibdev->usdev_lock);
672
673         return &context->ibucontext;
674 }
675
676 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
677 {
678         struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
679         struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
680         usnic_dbg("\n");
681
682         mutex_lock(&us_ibdev->usdev_lock);
683         BUG_ON(!list_empty(&context->qp_grp_list));
684         list_del(&context->link);
685         mutex_unlock(&us_ibdev->usdev_lock);
686         kfree(context);
687         return 0;
688 }
689
690 int usnic_ib_mmap(struct ib_ucontext *context,
691                                 struct vm_area_struct *vma)
692 {
693         struct usnic_ib_ucontext *uctx = to_ucontext(context);
694         struct usnic_ib_dev *us_ibdev;
695         struct usnic_ib_qp_grp *qp_grp;
696         struct usnic_ib_vf *vf;
697         struct vnic_dev_bar *bar;
698         dma_addr_t bus_addr;
699         unsigned int len;
700         unsigned int vfid;
701
702         usnic_dbg("\n");
703
704         us_ibdev = to_usdev(context->device);
705         vma->vm_flags |= VM_IO;
706         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
707         vfid = vma->vm_pgoff;
708         usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
709                         vma->vm_pgoff, PAGE_SHIFT, vfid);
710
711         mutex_lock(&us_ibdev->usdev_lock);
712         list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
713                 vf = qp_grp->vf;
714                 if (usnic_vnic_get_index(vf->vnic) == vfid) {
715                         bar = usnic_vnic_get_bar(vf->vnic, 0);
716                         if ((vma->vm_end - vma->vm_start) != bar->len) {
717                                 usnic_err("Bar0 Len %lu - Request map %lu\n",
718                                                 bar->len,
719                                                 vma->vm_end - vma->vm_start);
720                                 mutex_unlock(&us_ibdev->usdev_lock);
721                                 return -EINVAL;
722                         }
723                         bus_addr = bar->bus_addr;
724                         len = bar->len;
725                         usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
726                                         &bus_addr, bar->vaddr, bar->len);
727                         mutex_unlock(&us_ibdev->usdev_lock);
728
729                         return remap_pfn_range(vma,
730                                                 vma->vm_start,
731                                                 bus_addr >> PAGE_SHIFT,
732                                                 len, vma->vm_page_prot);
733                 }
734         }
735
736         mutex_unlock(&us_ibdev->usdev_lock);
737         usnic_err("No VF %u found\n", vfid);
738         return -EINVAL;
739 }
740
741 /* In ib callbacks section -  Start of stub funcs */
742 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
743                                         struct ib_ah_attr *ah_attr)
744 {
745         usnic_dbg("\n");
746         return ERR_PTR(-EPERM);
747 }
748
749 int usnic_ib_destroy_ah(struct ib_ah *ah)
750 {
751         usnic_dbg("\n");
752         return -EINVAL;
753 }
754
755 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
756                                 struct ib_send_wr **bad_wr)
757 {
758         usnic_dbg("\n");
759         return -EINVAL;
760 }
761
762 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
763                                 struct ib_recv_wr **bad_wr)
764 {
765         usnic_dbg("\n");
766         return -EINVAL;
767 }
768
769 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
770                                 struct ib_wc *wc)
771 {
772         usnic_dbg("\n");
773         return -EINVAL;
774 }
775
776 int usnic_ib_req_notify_cq(struct ib_cq *cq,
777                                         enum ib_cq_notify_flags flags)
778 {
779         usnic_dbg("\n");
780         return -EINVAL;
781 }
782
783 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
784 {
785         usnic_dbg("\n");
786         return ERR_PTR(-ENOMEM);
787 }
788
789
790 /* In ib callbacks section - End of stub funcs */
791 /* End of ib callbacks section */