]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/bnxt_re/ib_verbs.c
17741146968640afc7a38ccf7f7d4ed241e2c046
[karo-tx-linux.git] / drivers / infiniband / hw / bnxt_re / ib_verbs.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: IB Verbs interpreter
37  */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51
52 #include "bnxt_ulp.h"
53
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63
64 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
65                              struct bnxt_qplib_sge *sg_list, int num)
66 {
67         int i, total = 0;
68
69         for (i = 0; i < num; i++) {
70                 sg_list[i].addr = ib_sg_list[i].addr;
71                 sg_list[i].lkey = ib_sg_list[i].lkey;
72                 sg_list[i].size = ib_sg_list[i].length;
73                 total += sg_list[i].size;
74         }
75         return total;
76 }
77
78 /* Device */
79 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
80 {
81         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
82         struct net_device *netdev = NULL;
83
84         rcu_read_lock();
85         if (rdev)
86                 netdev = rdev->netdev;
87         if (netdev)
88                 dev_hold(netdev);
89
90         rcu_read_unlock();
91         return netdev;
92 }
93
94 int bnxt_re_query_device(struct ib_device *ibdev,
95                          struct ib_device_attr *ib_attr,
96                          struct ib_udata *udata)
97 {
98         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
99         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
100
101         memset(ib_attr, 0, sizeof(*ib_attr));
102
103         ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
104         bnxt_qplib_get_guid(rdev->netdev->dev_addr,
105                             (u8 *)&ib_attr->sys_image_guid);
106         ib_attr->max_mr_size = ~0ull;
107         ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
108                                  BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
109                                  BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
110
111         ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
112         ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
113         ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
114         ib_attr->max_qp = dev_attr->max_qp;
115         ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
116         ib_attr->device_cap_flags =
117                                     IB_DEVICE_CURR_QP_STATE_MOD
118                                     | IB_DEVICE_RC_RNR_NAK_GEN
119                                     | IB_DEVICE_SHUTDOWN_PORT
120                                     | IB_DEVICE_SYS_IMAGE_GUID
121                                     | IB_DEVICE_LOCAL_DMA_LKEY
122                                     | IB_DEVICE_RESIZE_MAX_WR
123                                     | IB_DEVICE_PORT_ACTIVE_EVENT
124                                     | IB_DEVICE_N_NOTIFY_CQ
125                                     | IB_DEVICE_MEM_WINDOW
126                                     | IB_DEVICE_MEM_WINDOW_TYPE_2B
127                                     | IB_DEVICE_MEM_MGT_EXTENSIONS;
128         ib_attr->max_sge = dev_attr->max_qp_sges;
129         ib_attr->max_sge_rd = dev_attr->max_qp_sges;
130         ib_attr->max_cq = dev_attr->max_cq;
131         ib_attr->max_cqe = dev_attr->max_cq_wqes;
132         ib_attr->max_mr = dev_attr->max_mr;
133         ib_attr->max_pd = dev_attr->max_pd;
134         ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
135         ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
136         ib_attr->atomic_cap = IB_ATOMIC_HCA;
137         ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
138
139         ib_attr->max_ee_rd_atom = 0;
140         ib_attr->max_res_rd_atom = 0;
141         ib_attr->max_ee_init_rd_atom = 0;
142         ib_attr->max_ee = 0;
143         ib_attr->max_rdd = 0;
144         ib_attr->max_mw = dev_attr->max_mw;
145         ib_attr->max_raw_ipv6_qp = 0;
146         ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
147         ib_attr->max_mcast_grp = 0;
148         ib_attr->max_mcast_qp_attach = 0;
149         ib_attr->max_total_mcast_qp_attach = 0;
150         ib_attr->max_ah = dev_attr->max_ah;
151
152         ib_attr->max_fmr = dev_attr->max_fmr;
153         ib_attr->max_map_per_fmr = 1;   /* ? */
154
155         ib_attr->max_srq = dev_attr->max_srq;
156         ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
157         ib_attr->max_srq_sge = dev_attr->max_srq_sges;
158
159         ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
160
161         ib_attr->max_pkeys = 1;
162         ib_attr->local_ca_ack_delay = 0;
163         return 0;
164 }
165
166 int bnxt_re_modify_device(struct ib_device *ibdev,
167                           int device_modify_mask,
168                           struct ib_device_modify *device_modify)
169 {
170         switch (device_modify_mask) {
171         case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
172                 /* Modify the GUID requires the modification of the GID table */
173                 /* GUID should be made as READ-ONLY */
174                 break;
175         case IB_DEVICE_MODIFY_NODE_DESC:
176                 /* Node Desc should be made as READ-ONLY */
177                 break;
178         default:
179                 break;
180         }
181         return 0;
182 }
183
184 static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
185 {
186         struct ethtool_link_ksettings lksettings;
187         u32 espeed;
188
189         if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
190                 memset(&lksettings, 0, sizeof(lksettings));
191                 rtnl_lock();
192                 netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
193                 rtnl_unlock();
194                 espeed = lksettings.base.speed;
195         } else {
196                 espeed = SPEED_UNKNOWN;
197         }
198         switch (espeed) {
199         case SPEED_1000:
200                 *speed = IB_SPEED_SDR;
201                 *width = IB_WIDTH_1X;
202                 break;
203         case SPEED_10000:
204                 *speed = IB_SPEED_QDR;
205                 *width = IB_WIDTH_1X;
206                 break;
207         case SPEED_20000:
208                 *speed = IB_SPEED_DDR;
209                 *width = IB_WIDTH_4X;
210                 break;
211         case SPEED_25000:
212                 *speed = IB_SPEED_EDR;
213                 *width = IB_WIDTH_1X;
214                 break;
215         case SPEED_40000:
216                 *speed = IB_SPEED_QDR;
217                 *width = IB_WIDTH_4X;
218                 break;
219         case SPEED_50000:
220                 break;
221         default:
222                 *speed = IB_SPEED_SDR;
223                 *width = IB_WIDTH_1X;
224                 break;
225         }
226 }
227
228 /* Port */
229 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
230                        struct ib_port_attr *port_attr)
231 {
232         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
233         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
234
235         memset(port_attr, 0, sizeof(*port_attr));
236
237         if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
238                 port_attr->state = IB_PORT_ACTIVE;
239                 port_attr->phys_state = 5;
240         } else {
241                 port_attr->state = IB_PORT_DOWN;
242                 port_attr->phys_state = 3;
243         }
244         port_attr->max_mtu = IB_MTU_4096;
245         port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
246         port_attr->gid_tbl_len = dev_attr->max_sgid;
247         port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
248                                     IB_PORT_DEVICE_MGMT_SUP |
249                                     IB_PORT_VENDOR_CLASS_SUP |
250                                     IB_PORT_IP_BASED_GIDS;
251
252         /* Max MSG size set to 2G for now */
253         port_attr->max_msg_sz = 0x80000000;
254         port_attr->bad_pkey_cntr = 0;
255         port_attr->qkey_viol_cntr = 0;
256         port_attr->pkey_tbl_len = dev_attr->max_pkey;
257         port_attr->lid = 0;
258         port_attr->sm_lid = 0;
259         port_attr->lmc = 0;
260         port_attr->max_vl_num = 4;
261         port_attr->sm_sl = 0;
262         port_attr->subnet_timeout = 0;
263         port_attr->init_type_reply = 0;
264         /* call the underlying netdev's ethtool hooks to query speed settings
265          * for which we acquire rtnl_lock _only_ if it's registered with
266          * IB stack to avoid race in the NETDEV_UNREG path
267          */
268         if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
269                 __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
270                                     &port_attr->active_width);
271         return 0;
272 }
273
274 int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
275                         int port_modify_mask,
276                         struct ib_port_modify *port_modify)
277 {
278         switch (port_modify_mask) {
279         case IB_PORT_SHUTDOWN:
280                 break;
281         case IB_PORT_INIT_TYPE:
282                 break;
283         case IB_PORT_RESET_QKEY_CNTR:
284                 break;
285         default:
286                 break;
287         }
288         return 0;
289 }
290
291 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
292                                struct ib_port_immutable *immutable)
293 {
294         struct ib_port_attr port_attr;
295
296         if (bnxt_re_query_port(ibdev, port_num, &port_attr))
297                 return -EINVAL;
298
299         immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
300         immutable->gid_tbl_len = port_attr.gid_tbl_len;
301         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
302         immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
303         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
304         return 0;
305 }
306
307 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
308                        u16 index, u16 *pkey)
309 {
310         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
311
312         /* Ignore port_num */
313
314         memset(pkey, 0, sizeof(*pkey));
315         return bnxt_qplib_get_pkey(&rdev->qplib_res,
316                                    &rdev->qplib_res.pkey_tbl, index, pkey);
317 }
318
319 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
320                       int index, union ib_gid *gid)
321 {
322         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
323         int rc = 0;
324
325         /* Ignore port_num */
326         memset(gid, 0, sizeof(*gid));
327         rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
328                                  &rdev->qplib_res.sgid_tbl, index,
329                                  (struct bnxt_qplib_gid *)gid);
330         return rc;
331 }
332
333 int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
334                     unsigned int index, void **context)
335 {
336         int rc = 0;
337         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
338         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
339         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
340
341         /* Delete the entry from the hardware */
342         ctx = *context;
343         if (!ctx)
344                 return -EINVAL;
345
346         if (sgid_tbl && sgid_tbl->active) {
347                 if (ctx->idx >= sgid_tbl->max)
348                         return -EINVAL;
349                 ctx->refcnt--;
350                 if (!ctx->refcnt) {
351                         rc = bnxt_qplib_del_sgid
352                                         (sgid_tbl,
353                                          &sgid_tbl->tbl[ctx->idx], true);
354                         if (rc)
355                                 dev_err(rdev_to_dev(rdev),
356                                         "Failed to remove GID: %#x", rc);
357                         ctx_tbl = sgid_tbl->ctx;
358                         ctx_tbl[ctx->idx] = NULL;
359                         kfree(ctx);
360                 }
361         } else {
362                 return -EINVAL;
363         }
364         return rc;
365 }
366
367 int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
368                     unsigned int index, const union ib_gid *gid,
369                     const struct ib_gid_attr *attr, void **context)
370 {
371         int rc;
372         u32 tbl_idx = 0;
373         u16 vlan_id = 0xFFFF;
374         struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
375         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
376         struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
377
378         if ((attr->ndev) && is_vlan_dev(attr->ndev))
379                 vlan_id = vlan_dev_vlan_id(attr->ndev);
380
381         rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)gid,
382                                  rdev->qplib_res.netdev->dev_addr,
383                                  vlan_id, true, &tbl_idx);
384         if (rc == -EALREADY) {
385                 ctx_tbl = sgid_tbl->ctx;
386                 ctx_tbl[tbl_idx]->refcnt++;
387                 *context = ctx_tbl[tbl_idx];
388                 return 0;
389         }
390
391         if (rc < 0) {
392                 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
393                 return rc;
394         }
395
396         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
397         if (!ctx)
398                 return -ENOMEM;
399         ctx_tbl = sgid_tbl->ctx;
400         ctx->idx = tbl_idx;
401         ctx->refcnt = 1;
402         ctx_tbl[tbl_idx] = ctx;
403
404         return rc;
405 }
406
407 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
408                                             u8 port_num)
409 {
410         return IB_LINK_LAYER_ETHERNET;
411 }
412
413 /* Protection Domains */
414 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
415 {
416         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
417         struct bnxt_re_dev *rdev = pd->rdev;
418         int rc;
419
420         if (ib_pd->uobject && pd->dpi.dbr) {
421                 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
422                 struct bnxt_re_ucontext *ucntx;
423
424                 /* Free DPI only if this is the first PD allocated by the
425                  * application and mark the context dpi as NULL
426                  */
427                 ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
428
429                 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
430                                             &rdev->qplib_res.dpi_tbl,
431                                             &pd->dpi);
432                 if (rc)
433                         dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
434                         /* Don't fail, continue*/
435                 ucntx->dpi = NULL;
436         }
437
438         rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
439                                    &rdev->qplib_res.pd_tbl,
440                                    &pd->qplib_pd);
441         if (rc) {
442                 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
443                 return rc;
444         }
445
446         kfree(pd);
447         return 0;
448 }
449
450 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
451                                struct ib_ucontext *ucontext,
452                                struct ib_udata *udata)
453 {
454         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
455         struct bnxt_re_ucontext *ucntx = container_of(ucontext,
456                                                       struct bnxt_re_ucontext,
457                                                       ib_uctx);
458         struct bnxt_re_pd *pd;
459         int rc;
460
461         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
462         if (!pd)
463                 return ERR_PTR(-ENOMEM);
464
465         pd->rdev = rdev;
466         if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
467                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
468                 rc = -ENOMEM;
469                 goto fail;
470         }
471
472         if (udata) {
473                 struct bnxt_re_pd_resp resp;
474
475                 if (!ucntx->dpi) {
476                         /* Allocate DPI in alloc_pd to avoid failing of
477                          * ibv_devinfo and family of application when DPIs
478                          * are depleted.
479                          */
480                         if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
481                                                  &pd->dpi, ucntx)) {
482                                 rc = -ENOMEM;
483                                 goto dbfail;
484                         }
485                         ucntx->dpi = &pd->dpi;
486                 }
487
488                 resp.pdid = pd->qplib_pd.id;
489                 /* Still allow mapping this DBR to the new user PD. */
490                 resp.dpi = ucntx->dpi->dpi;
491                 resp.dbr = (u64)ucntx->dpi->umdbr;
492
493                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
494                 if (rc) {
495                         dev_err(rdev_to_dev(rdev),
496                                 "Failed to copy user response\n");
497                         goto dbfail;
498                 }
499         }
500
501         return &pd->ib_pd;
502 dbfail:
503         (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
504                                     &pd->qplib_pd);
505 fail:
506         kfree(pd);
507         return ERR_PTR(rc);
508 }
509
510 /* Address Handles */
511 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
512 {
513         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
514         struct bnxt_re_dev *rdev = ah->rdev;
515         int rc;
516
517         rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
518         if (rc) {
519                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
520                 return rc;
521         }
522         kfree(ah);
523         return 0;
524 }
525
526 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
527                                 struct ib_ah_attr *ah_attr,
528                                 struct ib_udata *udata)
529 {
530         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
531         struct bnxt_re_dev *rdev = pd->rdev;
532         struct bnxt_re_ah *ah;
533         int rc;
534         u16 vlan_tag;
535         u8 nw_type;
536
537         struct ib_gid_attr sgid_attr;
538
539         if (!(ah_attr->ah_flags & IB_AH_GRH)) {
540                 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
541                 return ERR_PTR(-EINVAL);
542         }
543         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
544         if (!ah)
545                 return ERR_PTR(-ENOMEM);
546
547         ah->rdev = rdev;
548         ah->qplib_ah.pd = &pd->qplib_pd;
549
550         /* Supply the configuration for the HW */
551         memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw,
552                sizeof(union ib_gid));
553         /*
554          * If RoCE V2 is enabled, stack will have two entries for
555          * each GID entry. Avoiding this duplicte entry in HW. Dividing
556          * the GID index by 2 for RoCE V2
557          */
558         ah->qplib_ah.sgid_index = ah_attr->grh.sgid_index / 2;
559         ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index;
560         ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class;
561         ah->qplib_ah.flow_label = ah_attr->grh.flow_label;
562         ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit;
563         ah->qplib_ah.sl = ah_attr->sl;
564         if (ib_pd->uobject &&
565             !rdma_is_multicast_addr((struct in6_addr *)
566                                     ah_attr->grh.dgid.raw) &&
567             !rdma_link_local_addr((struct in6_addr *)
568                                   ah_attr->grh.dgid.raw)) {
569                 union ib_gid sgid;
570
571                 rc = ib_get_cached_gid(&rdev->ibdev, 1,
572                                        ah_attr->grh.sgid_index, &sgid,
573                                        &sgid_attr);
574                 if (rc) {
575                         dev_err(rdev_to_dev(rdev),
576                                 "Failed to query gid at index %d",
577                                 ah_attr->grh.sgid_index);
578                         goto fail;
579                 }
580                 if (sgid_attr.ndev) {
581                         if (is_vlan_dev(sgid_attr.ndev))
582                                 vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
583                         dev_put(sgid_attr.ndev);
584                 }
585                 /* Get network header type for this GID */
586                 nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
587                 switch (nw_type) {
588                 case RDMA_NETWORK_IPV4:
589                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
590                         break;
591                 case RDMA_NETWORK_IPV6:
592                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
593                         break;
594                 default:
595                         ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
596                         break;
597                 }
598                 rc = rdma_addr_find_l2_eth_by_grh(&sgid, &ah_attr->grh.dgid,
599                                                   ah_attr->dmac, &vlan_tag,
600                                                   &sgid_attr.ndev->ifindex,
601                                                   NULL);
602                 if (rc) {
603                         dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
604                         goto fail;
605                 }
606         }
607
608         memcpy(ah->qplib_ah.dmac, ah_attr->dmac, ETH_ALEN);
609         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
610         if (rc) {
611                 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
612                 goto fail;
613         }
614
615         /* Write AVID to shared page. */
616         if (ib_pd->uobject) {
617                 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
618                 struct bnxt_re_ucontext *uctx;
619                 unsigned long flag;
620                 u32 *wrptr;
621
622                 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
623                 spin_lock_irqsave(&uctx->sh_lock, flag);
624                 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
625                 *wrptr = ah->qplib_ah.id;
626                 wmb(); /* make sure cache is updated. */
627                 spin_unlock_irqrestore(&uctx->sh_lock, flag);
628         }
629
630         return &ah->ib_ah;
631
632 fail:
633         kfree(ah);
634         return ERR_PTR(rc);
635 }
636
637 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
638 {
639         return 0;
640 }
641
642 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
643 {
644         struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
645
646         memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data,
647                sizeof(union ib_gid));
648         ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index;
649         ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class;
650         ah_attr->sl = ah->qplib_ah.sl;
651         memcpy(ah_attr->dmac, ah->qplib_ah.dmac, ETH_ALEN);
652         ah_attr->ah_flags = IB_AH_GRH;
653         ah_attr->port_num = 1;
654         ah_attr->static_rate = 0;
655         return 0;
656 }
657
658 /* Queue Pairs */
659 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
660 {
661         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
662         struct bnxt_re_dev *rdev = qp->rdev;
663         int rc;
664
665         rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
666         if (rc) {
667                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
668                 return rc;
669         }
670         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
671                 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
672                                            &rdev->sqp_ah->qplib_ah);
673                 if (rc) {
674                         dev_err(rdev_to_dev(rdev),
675                                 "Failed to destroy HW AH for shadow QP");
676                         return rc;
677                 }
678
679                 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
680                                            &rdev->qp1_sqp->qplib_qp);
681                 if (rc) {
682                         dev_err(rdev_to_dev(rdev),
683                                 "Failed to destroy Shadow QP");
684                         return rc;
685                 }
686                 mutex_lock(&rdev->qp_lock);
687                 list_del(&rdev->qp1_sqp->list);
688                 atomic_dec(&rdev->qp_count);
689                 mutex_unlock(&rdev->qp_lock);
690
691                 kfree(rdev->sqp_ah);
692                 kfree(rdev->qp1_sqp);
693         }
694
695         if (!IS_ERR_OR_NULL(qp->rumem))
696                 ib_umem_release(qp->rumem);
697         if (!IS_ERR_OR_NULL(qp->sumem))
698                 ib_umem_release(qp->sumem);
699
700         mutex_lock(&rdev->qp_lock);
701         list_del(&qp->list);
702         atomic_dec(&rdev->qp_count);
703         mutex_unlock(&rdev->qp_lock);
704         kfree(qp);
705         return 0;
706 }
707
708 static u8 __from_ib_qp_type(enum ib_qp_type type)
709 {
710         switch (type) {
711         case IB_QPT_GSI:
712                 return CMDQ_CREATE_QP1_TYPE_GSI;
713         case IB_QPT_RC:
714                 return CMDQ_CREATE_QP_TYPE_RC;
715         case IB_QPT_UD:
716                 return CMDQ_CREATE_QP_TYPE_UD;
717         default:
718                 return IB_QPT_MAX;
719         }
720 }
721
722 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
723                                 struct bnxt_re_qp *qp, struct ib_udata *udata)
724 {
725         struct bnxt_re_qp_req ureq;
726         struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
727         struct ib_umem *umem;
728         int bytes = 0;
729         struct ib_ucontext *context = pd->ib_pd.uobject->context;
730         struct bnxt_re_ucontext *cntx = container_of(context,
731                                                      struct bnxt_re_ucontext,
732                                                      ib_uctx);
733         if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
734                 return -EFAULT;
735
736         bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
737         /* Consider mapping PSN search memory only for RC QPs. */
738         if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
739                 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
740         bytes = PAGE_ALIGN(bytes);
741         umem = ib_umem_get(context, ureq.qpsva, bytes,
742                            IB_ACCESS_LOCAL_WRITE, 1);
743         if (IS_ERR(umem))
744                 return PTR_ERR(umem);
745
746         qp->sumem = umem;
747         qplib_qp->sq.sglist = umem->sg_head.sgl;
748         qplib_qp->sq.nmap = umem->nmap;
749         qplib_qp->qp_handle = ureq.qp_handle;
750
751         if (!qp->qplib_qp.srq) {
752                 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
753                 bytes = PAGE_ALIGN(bytes);
754                 umem = ib_umem_get(context, ureq.qprva, bytes,
755                                    IB_ACCESS_LOCAL_WRITE, 1);
756                 if (IS_ERR(umem))
757                         goto rqfail;
758                 qp->rumem = umem;
759                 qplib_qp->rq.sglist = umem->sg_head.sgl;
760                 qplib_qp->rq.nmap = umem->nmap;
761         }
762
763         qplib_qp->dpi = cntx->dpi;
764         return 0;
765 rqfail:
766         ib_umem_release(qp->sumem);
767         qp->sumem = NULL;
768         qplib_qp->sq.sglist = NULL;
769         qplib_qp->sq.nmap = 0;
770
771         return PTR_ERR(umem);
772 }
773
774 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
775                                 (struct bnxt_re_pd *pd,
776                                  struct bnxt_qplib_res *qp1_res,
777                                  struct bnxt_qplib_qp *qp1_qp)
778 {
779         struct bnxt_re_dev *rdev = pd->rdev;
780         struct bnxt_re_ah *ah;
781         union ib_gid sgid;
782         int rc;
783
784         ah = kzalloc(sizeof(*ah), GFP_KERNEL);
785         if (!ah)
786                 return NULL;
787
788         memset(ah, 0, sizeof(*ah));
789         ah->rdev = rdev;
790         ah->qplib_ah.pd = &pd->qplib_pd;
791
792         rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
793         if (rc)
794                 goto fail;
795
796         /* supply the dgid data same as sgid */
797         memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
798                sizeof(union ib_gid));
799         ah->qplib_ah.sgid_index = 0;
800
801         ah->qplib_ah.traffic_class = 0;
802         ah->qplib_ah.flow_label = 0;
803         ah->qplib_ah.hop_limit = 1;
804         ah->qplib_ah.sl = 0;
805         /* Have DMAC same as SMAC */
806         ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
807
808         rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
809         if (rc) {
810                 dev_err(rdev_to_dev(rdev),
811                         "Failed to allocate HW AH for Shadow QP");
812                 goto fail;
813         }
814
815         return ah;
816
817 fail:
818         kfree(ah);
819         return NULL;
820 }
821
822 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
823                                 (struct bnxt_re_pd *pd,
824                                  struct bnxt_qplib_res *qp1_res,
825                                  struct bnxt_qplib_qp *qp1_qp)
826 {
827         struct bnxt_re_dev *rdev = pd->rdev;
828         struct bnxt_re_qp *qp;
829         int rc;
830
831         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
832         if (!qp)
833                 return NULL;
834
835         memset(qp, 0, sizeof(*qp));
836         qp->rdev = rdev;
837
838         /* Initialize the shadow QP structure from the QP1 values */
839         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
840
841         qp->qplib_qp.pd = &pd->qplib_pd;
842         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
843         qp->qplib_qp.type = IB_QPT_UD;
844
845         qp->qplib_qp.max_inline_data = 0;
846         qp->qplib_qp.sig_type = true;
847
848         /* Shadow QP SQ depth should be same as QP1 RQ depth */
849         qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
850         qp->qplib_qp.sq.max_sge = 2;
851
852         qp->qplib_qp.scq = qp1_qp->scq;
853         qp->qplib_qp.rcq = qp1_qp->rcq;
854
855         qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
856         qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
857
858         qp->qplib_qp.mtu = qp1_qp->mtu;
859
860         qp->qplib_qp.sq_hdr_buf_size = 0;
861         qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
862         qp->qplib_qp.dpi = &rdev->dpi_privileged;
863
864         rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
865         if (rc)
866                 goto fail;
867
868         rdev->sqp_id = qp->qplib_qp.id;
869
870         spin_lock_init(&qp->sq_lock);
871         INIT_LIST_HEAD(&qp->list);
872         mutex_lock(&rdev->qp_lock);
873         list_add_tail(&qp->list, &rdev->qp_list);
874         atomic_inc(&rdev->qp_count);
875         mutex_unlock(&rdev->qp_lock);
876         return qp;
877 fail:
878         kfree(qp);
879         return NULL;
880 }
881
882 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
883                                 struct ib_qp_init_attr *qp_init_attr,
884                                 struct ib_udata *udata)
885 {
886         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
887         struct bnxt_re_dev *rdev = pd->rdev;
888         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
889         struct bnxt_re_qp *qp;
890         struct bnxt_re_cq *cq;
891         int rc, entries;
892
893         if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
894             (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
895             (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
896             (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
897             (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
898                 return ERR_PTR(-EINVAL);
899
900         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
901         if (!qp)
902                 return ERR_PTR(-ENOMEM);
903
904         qp->rdev = rdev;
905         ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
906         qp->qplib_qp.pd = &pd->qplib_pd;
907         qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
908         qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
909         if (qp->qplib_qp.type == IB_QPT_MAX) {
910                 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
911                         qp->qplib_qp.type);
912                 rc = -EINVAL;
913                 goto fail;
914         }
915         qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
916         qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
917                                   IB_SIGNAL_ALL_WR) ? true : false);
918
919         entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
920         qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
921                                         dev_attr->max_qp_wqes + 1);
922
923         qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
924         if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
925                 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
926
927         if (qp_init_attr->send_cq) {
928                 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
929                                   ib_cq);
930                 if (!cq) {
931                         dev_err(rdev_to_dev(rdev), "Send CQ not found");
932                         rc = -EINVAL;
933                         goto fail;
934                 }
935                 qp->qplib_qp.scq = &cq->qplib_cq;
936         }
937
938         if (qp_init_attr->recv_cq) {
939                 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
940                                   ib_cq);
941                 if (!cq) {
942                         dev_err(rdev_to_dev(rdev), "Receive CQ not found");
943                         rc = -EINVAL;
944                         goto fail;
945                 }
946                 qp->qplib_qp.rcq = &cq->qplib_cq;
947         }
948
949         if (qp_init_attr->srq) {
950                 dev_err(rdev_to_dev(rdev), "SRQ not supported");
951                 rc = -ENOTSUPP;
952                 goto fail;
953         } else {
954                 /* Allocate 1 more than what's provided so posting max doesn't
955                  * mean empty
956                  */
957                 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
958                 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
959                                                 dev_attr->max_qp_wqes + 1);
960
961                 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
962                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
963                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
964         }
965
966         qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
967
968         if (qp_init_attr->qp_type == IB_QPT_GSI) {
969                 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
970                 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
971                         qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
972                 qp->qplib_qp.sq.max_sge++;
973                 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
974                         qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
975
976                 qp->qplib_qp.rq_hdr_buf_size =
977                                         BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
978
979                 qp->qplib_qp.sq_hdr_buf_size =
980                                         BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
981                 qp->qplib_qp.dpi = &rdev->dpi_privileged;
982                 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
983                 if (rc) {
984                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
985                         goto fail;
986                 }
987                 /* Create a shadow QP to handle the QP1 traffic */
988                 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
989                                                          &qp->qplib_qp);
990                 if (!rdev->qp1_sqp) {
991                         rc = -EINVAL;
992                         dev_err(rdev_to_dev(rdev),
993                                 "Failed to create Shadow QP for QP1");
994                         goto qp_destroy;
995                 }
996                 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
997                                                            &qp->qplib_qp);
998                 if (!rdev->sqp_ah) {
999                         bnxt_qplib_destroy_qp(&rdev->qplib_res,
1000                                               &rdev->qp1_sqp->qplib_qp);
1001                         rc = -EINVAL;
1002                         dev_err(rdev_to_dev(rdev),
1003                                 "Failed to create AH entry for ShadowQP");
1004                         goto qp_destroy;
1005                 }
1006
1007         } else {
1008                 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1009                 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1010                 if (udata) {
1011                         rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1012                         if (rc)
1013                                 goto fail;
1014                 } else {
1015                         qp->qplib_qp.dpi = &rdev->dpi_privileged;
1016                 }
1017
1018                 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1019                 if (rc) {
1020                         dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1021                         goto fail;
1022                 }
1023         }
1024
1025         qp->ib_qp.qp_num = qp->qplib_qp.id;
1026         spin_lock_init(&qp->sq_lock);
1027
1028         if (udata) {
1029                 struct bnxt_re_qp_resp resp;
1030
1031                 resp.qpid = qp->ib_qp.qp_num;
1032                 resp.rsvd = 0;
1033                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1034                 if (rc) {
1035                         dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1036                         goto qp_destroy;
1037                 }
1038         }
1039         INIT_LIST_HEAD(&qp->list);
1040         mutex_lock(&rdev->qp_lock);
1041         list_add_tail(&qp->list, &rdev->qp_list);
1042         atomic_inc(&rdev->qp_count);
1043         mutex_unlock(&rdev->qp_lock);
1044
1045         return &qp->ib_qp;
1046 qp_destroy:
1047         bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1048 fail:
1049         kfree(qp);
1050         return ERR_PTR(rc);
1051 }
1052
1053 static u8 __from_ib_qp_state(enum ib_qp_state state)
1054 {
1055         switch (state) {
1056         case IB_QPS_RESET:
1057                 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1058         case IB_QPS_INIT:
1059                 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1060         case IB_QPS_RTR:
1061                 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1062         case IB_QPS_RTS:
1063                 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1064         case IB_QPS_SQD:
1065                 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1066         case IB_QPS_SQE:
1067                 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1068         case IB_QPS_ERR:
1069         default:
1070                 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1071         }
1072 }
1073
1074 static enum ib_qp_state __to_ib_qp_state(u8 state)
1075 {
1076         switch (state) {
1077         case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1078                 return IB_QPS_RESET;
1079         case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1080                 return IB_QPS_INIT;
1081         case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1082                 return IB_QPS_RTR;
1083         case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1084                 return IB_QPS_RTS;
1085         case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1086                 return IB_QPS_SQD;
1087         case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1088                 return IB_QPS_SQE;
1089         case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1090         default:
1091                 return IB_QPS_ERR;
1092         }
1093 }
1094
1095 static u32 __from_ib_mtu(enum ib_mtu mtu)
1096 {
1097         switch (mtu) {
1098         case IB_MTU_256:
1099                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1100         case IB_MTU_512:
1101                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1102         case IB_MTU_1024:
1103                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1104         case IB_MTU_2048:
1105                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1106         case IB_MTU_4096:
1107                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1108         default:
1109                 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1110         }
1111 }
1112
1113 static enum ib_mtu __to_ib_mtu(u32 mtu)
1114 {
1115         switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1116         case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1117                 return IB_MTU_256;
1118         case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1119                 return IB_MTU_512;
1120         case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1121                 return IB_MTU_1024;
1122         case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1123                 return IB_MTU_2048;
1124         case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1125                 return IB_MTU_4096;
1126         default:
1127                 return IB_MTU_2048;
1128         }
1129 }
1130
1131 static int __from_ib_access_flags(int iflags)
1132 {
1133         int qflags = 0;
1134
1135         if (iflags & IB_ACCESS_LOCAL_WRITE)
1136                 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1137         if (iflags & IB_ACCESS_REMOTE_READ)
1138                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
1139         if (iflags & IB_ACCESS_REMOTE_WRITE)
1140                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
1141         if (iflags & IB_ACCESS_REMOTE_ATOMIC)
1142                 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
1143         if (iflags & IB_ACCESS_MW_BIND)
1144                 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
1145         if (iflags & IB_ZERO_BASED)
1146                 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
1147         if (iflags & IB_ACCESS_ON_DEMAND)
1148                 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
1149         return qflags;
1150 };
1151
1152 static enum ib_access_flags __to_ib_access_flags(int qflags)
1153 {
1154         enum ib_access_flags iflags = 0;
1155
1156         if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
1157                 iflags |= IB_ACCESS_LOCAL_WRITE;
1158         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
1159                 iflags |= IB_ACCESS_REMOTE_WRITE;
1160         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
1161                 iflags |= IB_ACCESS_REMOTE_READ;
1162         if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
1163                 iflags |= IB_ACCESS_REMOTE_ATOMIC;
1164         if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
1165                 iflags |= IB_ACCESS_MW_BIND;
1166         if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
1167                 iflags |= IB_ZERO_BASED;
1168         if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
1169                 iflags |= IB_ACCESS_ON_DEMAND;
1170         return iflags;
1171 };
1172
1173 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1174                                     struct bnxt_re_qp *qp1_qp,
1175                                     int qp_attr_mask)
1176 {
1177         struct bnxt_re_qp *qp = rdev->qp1_sqp;
1178         int rc = 0;
1179
1180         if (qp_attr_mask & IB_QP_STATE) {
1181                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1182                 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1183         }
1184         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1185                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1186                 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1187         }
1188
1189         if (qp_attr_mask & IB_QP_QKEY) {
1190                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1191                 /* Using a Random  QKEY */
1192                 qp->qplib_qp.qkey = 0x81818181;
1193         }
1194         if (qp_attr_mask & IB_QP_SQ_PSN) {
1195                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1196                 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1197         }
1198
1199         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1200         if (rc)
1201                 dev_err(rdev_to_dev(rdev),
1202                         "Failed to modify Shadow QP for QP1");
1203         return rc;
1204 }
1205
1206 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1207                       int qp_attr_mask, struct ib_udata *udata)
1208 {
1209         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1210         struct bnxt_re_dev *rdev = qp->rdev;
1211         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1212         enum ib_qp_state curr_qp_state, new_qp_state;
1213         int rc, entries;
1214         int status;
1215         union ib_gid sgid;
1216         struct ib_gid_attr sgid_attr;
1217         u8 nw_type;
1218
1219         qp->qplib_qp.modify_flags = 0;
1220         if (qp_attr_mask & IB_QP_STATE) {
1221                 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1222                 new_qp_state = qp_attr->qp_state;
1223                 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1224                                         ib_qp->qp_type, qp_attr_mask,
1225                                         IB_LINK_LAYER_ETHERNET)) {
1226                         dev_err(rdev_to_dev(rdev),
1227                                 "Invalid attribute mask: %#x specified ",
1228                                 qp_attr_mask);
1229                         dev_err(rdev_to_dev(rdev),
1230                                 "for qpn: %#x type: %#x",
1231                                 ib_qp->qp_num, ib_qp->qp_type);
1232                         dev_err(rdev_to_dev(rdev),
1233                                 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1234                                 curr_qp_state, new_qp_state);
1235                         return -EINVAL;
1236                 }
1237                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1238                 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1239         }
1240         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1241                 qp->qplib_qp.modify_flags |=
1242                                 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1243                 qp->qplib_qp.en_sqd_async_notify = true;
1244         }
1245         if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1246                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1247                 qp->qplib_qp.access =
1248                         __from_ib_access_flags(qp_attr->qp_access_flags);
1249                 /* LOCAL_WRITE access must be set to allow RC receive */
1250                 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1251         }
1252         if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1253                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1254                 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1255         }
1256         if (qp_attr_mask & IB_QP_QKEY) {
1257                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1258                 qp->qplib_qp.qkey = qp_attr->qkey;
1259         }
1260         if (qp_attr_mask & IB_QP_AV) {
1261                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1262                                      CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1263                                      CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1264                                      CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1265                                      CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1266                                      CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1267                                      CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1268                 memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw,
1269                        sizeof(qp->qplib_qp.ah.dgid.data));
1270                 qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label;
1271                 /* If RoCE V2 is enabled, stack will have two entries for
1272                  * each GID entry. Avoiding this duplicte entry in HW. Dividing
1273                  * the GID index by 2 for RoCE V2
1274                  */
1275                 qp->qplib_qp.ah.sgid_index =
1276                                         qp_attr->ah_attr.grh.sgid_index / 2;
1277                 qp->qplib_qp.ah.host_sgid_index =
1278                                         qp_attr->ah_attr.grh.sgid_index;
1279                 qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit;
1280                 qp->qplib_qp.ah.traffic_class =
1281                                         qp_attr->ah_attr.grh.traffic_class;
1282                 qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl;
1283                 ether_addr_copy(qp->qplib_qp.ah.dmac, qp_attr->ah_attr.dmac);
1284
1285                 status = ib_get_cached_gid(&rdev->ibdev, 1,
1286                                            qp_attr->ah_attr.grh.sgid_index,
1287                                            &sgid, &sgid_attr);
1288                 if (!status && sgid_attr.ndev) {
1289                         memcpy(qp->qplib_qp.smac, sgid_attr.ndev->dev_addr,
1290                                ETH_ALEN);
1291                         dev_put(sgid_attr.ndev);
1292                         nw_type = ib_gid_to_network_type(sgid_attr.gid_type,
1293                                                          &sgid);
1294                         switch (nw_type) {
1295                         case RDMA_NETWORK_IPV4:
1296                                 qp->qplib_qp.nw_type =
1297                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1298                                 break;
1299                         case RDMA_NETWORK_IPV6:
1300                                 qp->qplib_qp.nw_type =
1301                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1302                                 break;
1303                         default:
1304                                 qp->qplib_qp.nw_type =
1305                                         CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1306                                 break;
1307                         }
1308                 }
1309         }
1310
1311         if (qp_attr_mask & IB_QP_PATH_MTU) {
1312                 qp->qplib_qp.modify_flags |=
1313                                 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1314                 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1315         } else if (qp_attr->qp_state == IB_QPS_RTR) {
1316                 qp->qplib_qp.modify_flags |=
1317                         CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1318                 qp->qplib_qp.path_mtu =
1319                         __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1320         }
1321
1322         if (qp_attr_mask & IB_QP_TIMEOUT) {
1323                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1324                 qp->qplib_qp.timeout = qp_attr->timeout;
1325         }
1326         if (qp_attr_mask & IB_QP_RETRY_CNT) {
1327                 qp->qplib_qp.modify_flags |=
1328                                 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1329                 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1330         }
1331         if (qp_attr_mask & IB_QP_RNR_RETRY) {
1332                 qp->qplib_qp.modify_flags |=
1333                                 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1334                 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1335         }
1336         if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1337                 qp->qplib_qp.modify_flags |=
1338                                 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1339                 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1340         }
1341         if (qp_attr_mask & IB_QP_RQ_PSN) {
1342                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1343                 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1344         }
1345         if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1346                 qp->qplib_qp.modify_flags |=
1347                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1348                 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
1349         }
1350         if (qp_attr_mask & IB_QP_SQ_PSN) {
1351                 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1352                 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1353         }
1354         if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1355                 qp->qplib_qp.modify_flags |=
1356                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1357                 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1358         }
1359         if (qp_attr_mask & IB_QP_CAP) {
1360                 qp->qplib_qp.modify_flags |=
1361                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1362                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1363                                 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1364                                 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1365                                 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1366                 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1367                     (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1368                     (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1369                     (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1370                     (qp_attr->cap.max_inline_data >=
1371                                                 dev_attr->max_inline_data)) {
1372                         dev_err(rdev_to_dev(rdev),
1373                                 "Create QP failed - max exceeded");
1374                         return -EINVAL;
1375                 }
1376                 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1377                 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1378                                                 dev_attr->max_qp_wqes + 1);
1379                 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1380                 if (qp->qplib_qp.rq.max_wqe) {
1381                         entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1382                         qp->qplib_qp.rq.max_wqe =
1383                                 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1384                         qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1385                 } else {
1386                         /* SRQ was used prior, just ignore the RQ caps */
1387                 }
1388         }
1389         if (qp_attr_mask & IB_QP_DEST_QPN) {
1390                 qp->qplib_qp.modify_flags |=
1391                                 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1392                 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1393         }
1394         rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1395         if (rc) {
1396                 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1397                 return rc;
1398         }
1399         if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1400                 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1401         return rc;
1402 }
1403
1404 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1405                      int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1406 {
1407         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1408         struct bnxt_re_dev *rdev = qp->rdev;
1409         struct bnxt_qplib_qp qplib_qp;
1410         int rc;
1411
1412         memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
1413         qplib_qp.id = qp->qplib_qp.id;
1414         qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1415
1416         rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
1417         if (rc) {
1418                 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1419                 return rc;
1420         }
1421         qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
1422         qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
1423         qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
1424         qp_attr->pkey_index = qplib_qp.pkey_index;
1425         qp_attr->qkey = qplib_qp.qkey;
1426         memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp.ah.dgid.data,
1427                sizeof(qplib_qp.ah.dgid.data));
1428         qp_attr->ah_attr.grh.flow_label = qplib_qp.ah.flow_label;
1429         qp_attr->ah_attr.grh.sgid_index = qplib_qp.ah.host_sgid_index;
1430         qp_attr->ah_attr.grh.hop_limit = qplib_qp.ah.hop_limit;
1431         qp_attr->ah_attr.grh.traffic_class = qplib_qp.ah.traffic_class;
1432         qp_attr->ah_attr.sl = qplib_qp.ah.sl;
1433         ether_addr_copy(qp_attr->ah_attr.dmac, qplib_qp.ah.dmac);
1434         qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
1435         qp_attr->timeout = qplib_qp.timeout;
1436         qp_attr->retry_cnt = qplib_qp.retry_cnt;
1437         qp_attr->rnr_retry = qplib_qp.rnr_retry;
1438         qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
1439         qp_attr->rq_psn = qplib_qp.rq.psn;
1440         qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
1441         qp_attr->sq_psn = qplib_qp.sq.psn;
1442         qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
1443         qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
1444                                                         IB_SIGNAL_REQ_WR;
1445         qp_attr->dest_qp_num = qplib_qp.dest_qpn;
1446
1447         qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1448         qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1449         qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1450         qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1451         qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1452         qp_init_attr->cap = qp_attr->cap;
1453
1454         return 0;
1455 }
1456
1457 /* Routine for sending QP1 packets for RoCE V1 an V2
1458  */
1459 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1460                                      struct ib_send_wr *wr,
1461                                      struct bnxt_qplib_swqe *wqe,
1462                                      int payload_size)
1463 {
1464         struct ib_device *ibdev = &qp->rdev->ibdev;
1465         struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1466                                              ib_ah);
1467         struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1468         struct bnxt_qplib_sge sge;
1469         union ib_gid sgid;
1470         u8 nw_type;
1471         u16 ether_type;
1472         struct ib_gid_attr sgid_attr;
1473         union ib_gid dgid;
1474         bool is_eth = false;
1475         bool is_vlan = false;
1476         bool is_grh = false;
1477         bool is_udp = false;
1478         u8 ip_version = 0;
1479         u16 vlan_id = 0xFFFF;
1480         void *buf;
1481         int i, rc = 0, size;
1482
1483         memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1484
1485         rc = ib_get_cached_gid(ibdev, 1,
1486                                qplib_ah->host_sgid_index, &sgid,
1487                                &sgid_attr);
1488         if (rc) {
1489                 dev_err(rdev_to_dev(qp->rdev),
1490                         "Failed to query gid at index %d",
1491                         qplib_ah->host_sgid_index);
1492                 return rc;
1493         }
1494         if (sgid_attr.ndev) {
1495                 if (is_vlan_dev(sgid_attr.ndev))
1496                         vlan_id = vlan_dev_vlan_id(sgid_attr.ndev);
1497                 dev_put(sgid_attr.ndev);
1498         }
1499         /* Get network header type for this GID */
1500         nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1501         switch (nw_type) {
1502         case RDMA_NETWORK_IPV4:
1503                 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1504                 break;
1505         case RDMA_NETWORK_IPV6:
1506                 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1507                 break;
1508         default:
1509                 nw_type = BNXT_RE_ROCE_V1_PACKET;
1510                 break;
1511         }
1512         memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1513         is_udp = sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1514         if (is_udp) {
1515                 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
1516                         ip_version = 4;
1517                         ether_type = ETH_P_IP;
1518                 } else {
1519                         ip_version = 6;
1520                         ether_type = ETH_P_IPV6;
1521                 }
1522                 is_grh = false;
1523         } else {
1524                 ether_type = ETH_P_IBOE;
1525                 is_grh = true;
1526         }
1527
1528         is_eth = true;
1529         is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1530
1531         ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1532                           ip_version, is_udp, 0, &qp->qp1_hdr);
1533
1534         /* ETH */
1535         ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1536         ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1537
1538         /* For vlan, check the sgid for vlan existence */
1539
1540         if (!is_vlan) {
1541                 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1542         } else {
1543                 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1544                 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1545         }
1546
1547         if (is_grh || (ip_version == 6)) {
1548                 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
1549                 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1550                        sizeof(sgid));
1551                 qp->qp1_hdr.grh.hop_limit     = qplib_ah->hop_limit;
1552         }
1553
1554         if (ip_version == 4) {
1555                 qp->qp1_hdr.ip4.tos = 0;
1556                 qp->qp1_hdr.ip4.id = 0;
1557                 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1558                 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1559
1560                 memcpy(&qp->qp1_hdr.ip4.saddr, sgid.raw + 12, 4);
1561                 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1562                 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1563         }
1564
1565         if (is_udp) {
1566                 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1567                 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1568                 qp->qp1_hdr.udp.csum = 0;
1569         }
1570
1571         /* BTH */
1572         if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1573                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1574                 qp->qp1_hdr.immediate_present = 1;
1575         } else {
1576                 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1577         }
1578         if (wr->send_flags & IB_SEND_SOLICITED)
1579                 qp->qp1_hdr.bth.solicited_event = 1;
1580         /* pad_count */
1581         qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1582
1583         /* P_key for QP1 is for all members */
1584         qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1585         qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1586         qp->qp1_hdr.bth.ack_req = 0;
1587         qp->send_psn++;
1588         qp->send_psn &= BTH_PSN_MASK;
1589         qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1590         /* DETH */
1591         /* Use the priviledged Q_Key for QP1 */
1592         qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1593         qp->qp1_hdr.deth.source_qpn = IB_QP1;
1594
1595         /* Pack the QP1 to the transmit buffer */
1596         buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1597         if (buf) {
1598                 size = ib_ud_header_pack(&qp->qp1_hdr, buf);
1599                 for (i = wqe->num_sge; i; i--) {
1600                         wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1601                         wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1602                         wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1603                 }
1604
1605                 /*
1606                  * Max Header buf size for IPV6 RoCE V2 is 86,
1607                  * which is same as the QP1 SQ header buffer.
1608                  * Header buf size for IPV4 RoCE V2 can be 66.
1609                  * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1610                  * Subtract 20 bytes from QP1 SQ header buf size
1611                  */
1612                 if (is_udp && ip_version == 4)
1613                         sge.size -= 20;
1614                 /*
1615                  * Max Header buf size for RoCE V1 is 78.
1616                  * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1617                  * Subtract 8 bytes from QP1 SQ header buf size
1618                  */
1619                 if (!is_udp)
1620                         sge.size -= 8;
1621
1622                 /* Subtract 4 bytes for non vlan packets */
1623                 if (!is_vlan)
1624                         sge.size -= 4;
1625
1626                 wqe->sg_list[0].addr = sge.addr;
1627                 wqe->sg_list[0].lkey = sge.lkey;
1628                 wqe->sg_list[0].size = sge.size;
1629                 wqe->num_sge++;
1630
1631         } else {
1632                 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1633                 rc = -ENOMEM;
1634         }
1635         return rc;
1636 }
1637
1638 /* For the MAD layer, it only provides the recv SGE the size of
1639  * ib_grh + MAD datagram.  No Ethernet headers, Ethertype, BTH, DETH,
1640  * nor RoCE iCRC.  The Cu+ solution must provide buffer for the entire
1641  * receive packet (334 bytes) with no VLAN and then copy the GRH
1642  * and the MAD datagram out to the provided SGE.
1643  */
1644 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
1645                                             struct ib_recv_wr *wr,
1646                                             struct bnxt_qplib_swqe *wqe,
1647                                             int payload_size)
1648 {
1649         struct bnxt_qplib_sge ref, sge;
1650         u32 rq_prod_index;
1651         struct bnxt_re_sqp_entries *sqp_entry;
1652
1653         rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
1654
1655         if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
1656                 return -ENOMEM;
1657
1658         /* Create 1 SGE to receive the entire
1659          * ethernet packet
1660          */
1661         /* Save the reference from ULP */
1662         ref.addr = wqe->sg_list[0].addr;
1663         ref.lkey = wqe->sg_list[0].lkey;
1664         ref.size = wqe->sg_list[0].size;
1665
1666         sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
1667
1668         /* SGE 1 */
1669         wqe->sg_list[0].addr = sge.addr;
1670         wqe->sg_list[0].lkey = sge.lkey;
1671         wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1672         sge.size -= wqe->sg_list[0].size;
1673
1674         sqp_entry->sge.addr = ref.addr;
1675         sqp_entry->sge.lkey = ref.lkey;
1676         sqp_entry->sge.size = ref.size;
1677         /* Store the wrid for reporting completion */
1678         sqp_entry->wrid = wqe->wr_id;
1679         /* change the wqe->wrid to table index */
1680         wqe->wr_id = rq_prod_index;
1681         return 0;
1682 }
1683
1684 static int is_ud_qp(struct bnxt_re_qp *qp)
1685 {
1686         return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
1687 }
1688
1689 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
1690                                   struct ib_send_wr *wr,
1691                                   struct bnxt_qplib_swqe *wqe)
1692 {
1693         struct bnxt_re_ah *ah = NULL;
1694
1695         if (is_ud_qp(qp)) {
1696                 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
1697                 wqe->send.q_key = ud_wr(wr)->remote_qkey;
1698                 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
1699                 wqe->send.avid = ah->qplib_ah.id;
1700         }
1701         switch (wr->opcode) {
1702         case IB_WR_SEND:
1703                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
1704                 break;
1705         case IB_WR_SEND_WITH_IMM:
1706                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
1707                 wqe->send.imm_data = wr->ex.imm_data;
1708                 break;
1709         case IB_WR_SEND_WITH_INV:
1710                 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
1711                 wqe->send.inv_key = wr->ex.invalidate_rkey;
1712                 break;
1713         default:
1714                 return -EINVAL;
1715         }
1716         if (wr->send_flags & IB_SEND_SIGNALED)
1717                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1718         if (wr->send_flags & IB_SEND_FENCE)
1719                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1720         if (wr->send_flags & IB_SEND_SOLICITED)
1721                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1722         if (wr->send_flags & IB_SEND_INLINE)
1723                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1724
1725         return 0;
1726 }
1727
1728 static int bnxt_re_build_rdma_wqe(struct ib_send_wr *wr,
1729                                   struct bnxt_qplib_swqe *wqe)
1730 {
1731         switch (wr->opcode) {
1732         case IB_WR_RDMA_WRITE:
1733                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
1734                 break;
1735         case IB_WR_RDMA_WRITE_WITH_IMM:
1736                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
1737                 wqe->rdma.imm_data = wr->ex.imm_data;
1738                 break;
1739         case IB_WR_RDMA_READ:
1740                 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
1741                 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
1742                 break;
1743         default:
1744                 return -EINVAL;
1745         }
1746         wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
1747         wqe->rdma.r_key = rdma_wr(wr)->rkey;
1748         if (wr->send_flags & IB_SEND_SIGNALED)
1749                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1750         if (wr->send_flags & IB_SEND_FENCE)
1751                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1752         if (wr->send_flags & IB_SEND_SOLICITED)
1753                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1754         if (wr->send_flags & IB_SEND_INLINE)
1755                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
1756
1757         return 0;
1758 }
1759
1760 static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1761                                     struct bnxt_qplib_swqe *wqe)
1762 {
1763         switch (wr->opcode) {
1764         case IB_WR_ATOMIC_CMP_AND_SWP:
1765                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1766                 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1767                 break;
1768         case IB_WR_ATOMIC_FETCH_AND_ADD:
1769                 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
1770                 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1771                 break;
1772         default:
1773                 return -EINVAL;
1774         }
1775         wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
1776         wqe->atomic.r_key = atomic_wr(wr)->rkey;
1777         if (wr->send_flags & IB_SEND_SIGNALED)
1778                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1779         if (wr->send_flags & IB_SEND_FENCE)
1780                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1781         if (wr->send_flags & IB_SEND_SOLICITED)
1782                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1783         return 0;
1784 }
1785
1786 static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
1787                                  struct bnxt_qplib_swqe *wqe)
1788 {
1789         wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
1790         wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
1791
1792         if (wr->send_flags & IB_SEND_SIGNALED)
1793                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1794         if (wr->send_flags & IB_SEND_FENCE)
1795                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1796         if (wr->send_flags & IB_SEND_SOLICITED)
1797                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
1798
1799         return 0;
1800 }
1801
1802 static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
1803                                  struct bnxt_qplib_swqe *wqe)
1804 {
1805         struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
1806         struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
1807         int access = wr->access;
1808
1809         wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
1810         wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
1811         wqe->frmr.page_list = mr->pages;
1812         wqe->frmr.page_list_len = mr->npages;
1813         wqe->frmr.levels = qplib_frpl->hwq.level + 1;
1814         wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
1815
1816         if (wr->wr.send_flags & IB_SEND_FENCE)
1817                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
1818         if (wr->wr.send_flags & IB_SEND_SIGNALED)
1819                 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
1820
1821         if (access & IB_ACCESS_LOCAL_WRITE)
1822                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1823         if (access & IB_ACCESS_REMOTE_READ)
1824                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
1825         if (access & IB_ACCESS_REMOTE_WRITE)
1826                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
1827         if (access & IB_ACCESS_REMOTE_ATOMIC)
1828                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
1829         if (access & IB_ACCESS_MW_BIND)
1830                 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
1831
1832         wqe->frmr.l_key = wr->key;
1833         wqe->frmr.length = wr->mr->length;
1834         wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
1835         wqe->frmr.va = wr->mr->iova;
1836         return 0;
1837 }
1838
1839 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
1840                                     struct ib_send_wr *wr,
1841                                     struct bnxt_qplib_swqe *wqe)
1842 {
1843         /*  Copy the inline data to the data  field */
1844         u8 *in_data;
1845         u32 i, sge_len;
1846         void *sge_addr;
1847
1848         in_data = wqe->inline_data;
1849         for (i = 0; i < wr->num_sge; i++) {
1850                 sge_addr = (void *)(unsigned long)
1851                                 wr->sg_list[i].addr;
1852                 sge_len = wr->sg_list[i].length;
1853
1854                 if ((sge_len + wqe->inline_len) >
1855                     BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1856                         dev_err(rdev_to_dev(rdev),
1857                                 "Inline data size requested > supported value");
1858                         return -EINVAL;
1859                 }
1860                 sge_len = wr->sg_list[i].length;
1861
1862                 memcpy(in_data, sge_addr, sge_len);
1863                 in_data += wr->sg_list[i].length;
1864                 wqe->inline_len += wr->sg_list[i].length;
1865         }
1866         return wqe->inline_len;
1867 }
1868
1869 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
1870                                    struct ib_send_wr *wr,
1871                                    struct bnxt_qplib_swqe *wqe)
1872 {
1873         int payload_sz = 0;
1874
1875         if (wr->send_flags & IB_SEND_INLINE)
1876                 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
1877         else
1878                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
1879                                                wqe->num_sge);
1880
1881         return payload_sz;
1882 }
1883
1884 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
1885                                        struct bnxt_re_qp *qp,
1886                                 struct ib_send_wr *wr)
1887 {
1888         struct bnxt_qplib_swqe wqe;
1889         int rc = 0, payload_sz = 0;
1890         unsigned long flags;
1891
1892         spin_lock_irqsave(&qp->sq_lock, flags);
1893         memset(&wqe, 0, sizeof(wqe));
1894         while (wr) {
1895                 /* House keeping */
1896                 memset(&wqe, 0, sizeof(wqe));
1897
1898                 /* Common */
1899                 wqe.num_sge = wr->num_sge;
1900                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
1901                         dev_err(rdev_to_dev(rdev),
1902                                 "Limit exceeded for Send SGEs");
1903                         rc = -EINVAL;
1904                         goto bad;
1905                 }
1906
1907                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
1908                 if (payload_sz < 0) {
1909                         rc = -EINVAL;
1910                         goto bad;
1911                 }
1912                 wqe.wr_id = wr->wr_id;
1913
1914                 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
1915
1916                 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
1917                 if (!rc)
1918                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
1919 bad:
1920                 if (rc) {
1921                         dev_err(rdev_to_dev(rdev),
1922                                 "Post send failed opcode = %#x rc = %d",
1923                                 wr->opcode, rc);
1924                         break;
1925                 }
1926                 wr = wr->next;
1927         }
1928         bnxt_qplib_post_send_db(&qp->qplib_qp);
1929         spin_unlock_irqrestore(&qp->sq_lock, flags);
1930         return rc;
1931 }
1932
1933 int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
1934                       struct ib_send_wr **bad_wr)
1935 {
1936         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1937         struct bnxt_qplib_swqe wqe;
1938         int rc = 0, payload_sz = 0;
1939         unsigned long flags;
1940
1941         spin_lock_irqsave(&qp->sq_lock, flags);
1942         while (wr) {
1943                 /* House keeping */
1944                 memset(&wqe, 0, sizeof(wqe));
1945
1946                 /* Common */
1947                 wqe.num_sge = wr->num_sge;
1948                 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
1949                         dev_err(rdev_to_dev(qp->rdev),
1950                                 "Limit exceeded for Send SGEs");
1951                         rc = -EINVAL;
1952                         goto bad;
1953                 }
1954
1955                 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
1956                 if (payload_sz < 0) {
1957                         rc = -EINVAL;
1958                         goto bad;
1959                 }
1960                 wqe.wr_id = wr->wr_id;
1961
1962                 switch (wr->opcode) {
1963                 case IB_WR_SEND:
1964                 case IB_WR_SEND_WITH_IMM:
1965                         if (ib_qp->qp_type == IB_QPT_GSI) {
1966                                 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
1967                                                                payload_sz);
1968                                 if (rc)
1969                                         goto bad;
1970                                 wqe.rawqp1.lflags |=
1971                                         SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
1972                         }
1973                         switch (wr->send_flags) {
1974                         case IB_SEND_IP_CSUM:
1975                                 wqe.rawqp1.lflags |=
1976                                         SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
1977                                 break;
1978                         default:
1979                                 break;
1980                         }
1981                         /* Fall thru to build the wqe */
1982                 case IB_WR_SEND_WITH_INV:
1983                         rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
1984                         break;
1985                 case IB_WR_RDMA_WRITE:
1986                 case IB_WR_RDMA_WRITE_WITH_IMM:
1987                 case IB_WR_RDMA_READ:
1988                         rc = bnxt_re_build_rdma_wqe(wr, &wqe);
1989                         break;
1990                 case IB_WR_ATOMIC_CMP_AND_SWP:
1991                 case IB_WR_ATOMIC_FETCH_AND_ADD:
1992                         rc = bnxt_re_build_atomic_wqe(wr, &wqe);
1993                         break;
1994                 case IB_WR_RDMA_READ_WITH_INV:
1995                         dev_err(rdev_to_dev(qp->rdev),
1996                                 "RDMA Read with Invalidate is not supported");
1997                         rc = -EINVAL;
1998                         goto bad;
1999                 case IB_WR_LOCAL_INV:
2000                         rc = bnxt_re_build_inv_wqe(wr, &wqe);
2001                         break;
2002                 case IB_WR_REG_MR:
2003                         rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2004                         break;
2005                 default:
2006                         /* Unsupported WRs */
2007                         dev_err(rdev_to_dev(qp->rdev),
2008                                 "WR (%#x) is not supported", wr->opcode);
2009                         rc = -EINVAL;
2010                         goto bad;
2011                 }
2012                 if (!rc)
2013                         rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2014 bad:
2015                 if (rc) {
2016                         dev_err(rdev_to_dev(qp->rdev),
2017                                 "post_send failed op:%#x qps = %#x rc = %d\n",
2018                                 wr->opcode, qp->qplib_qp.state, rc);
2019                         *bad_wr = wr;
2020                         break;
2021                 }
2022                 wr = wr->next;
2023         }
2024         bnxt_qplib_post_send_db(&qp->qplib_qp);
2025         spin_unlock_irqrestore(&qp->sq_lock, flags);
2026
2027         return rc;
2028 }
2029
2030 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2031                                        struct bnxt_re_qp *qp,
2032                                        struct ib_recv_wr *wr)
2033 {
2034         struct bnxt_qplib_swqe wqe;
2035         int rc = 0, payload_sz = 0;
2036
2037         memset(&wqe, 0, sizeof(wqe));
2038         while (wr) {
2039                 /* House keeping */
2040                 memset(&wqe, 0, sizeof(wqe));
2041
2042                 /* Common */
2043                 wqe.num_sge = wr->num_sge;
2044                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2045                         dev_err(rdev_to_dev(rdev),
2046                                 "Limit exceeded for Receive SGEs");
2047                         rc = -EINVAL;
2048                         break;
2049                 }
2050                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2051                                                wr->num_sge);
2052                 wqe.wr_id = wr->wr_id;
2053                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2054
2055                 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2056                 if (rc)
2057                         break;
2058
2059                 wr = wr->next;
2060         }
2061         if (!rc)
2062                 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2063         return rc;
2064 }
2065
2066 int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
2067                       struct ib_recv_wr **bad_wr)
2068 {
2069         struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2070         struct bnxt_qplib_swqe wqe;
2071         int rc = 0, payload_sz = 0;
2072
2073         while (wr) {
2074                 /* House keeping */
2075                 memset(&wqe, 0, sizeof(wqe));
2076
2077                 /* Common */
2078                 wqe.num_sge = wr->num_sge;
2079                 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2080                         dev_err(rdev_to_dev(qp->rdev),
2081                                 "Limit exceeded for Receive SGEs");
2082                         rc = -EINVAL;
2083                         *bad_wr = wr;
2084                         break;
2085                 }
2086
2087                 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2088                                                wr->num_sge);
2089                 wqe.wr_id = wr->wr_id;
2090                 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2091
2092                 if (ib_qp->qp_type == IB_QPT_GSI)
2093                         rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2094                                                               payload_sz);
2095                 if (!rc)
2096                         rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2097                 if (rc) {
2098                         *bad_wr = wr;
2099                         break;
2100                 }
2101                 wr = wr->next;
2102         }
2103         bnxt_qplib_post_recv_db(&qp->qplib_qp);
2104         return rc;
2105 }
2106
2107 /* Completion Queues */
2108 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2109 {
2110         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2111         struct bnxt_re_dev *rdev = cq->rdev;
2112         int rc;
2113
2114         rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2115         if (rc) {
2116                 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2117                 return rc;
2118         }
2119         if (!IS_ERR_OR_NULL(cq->umem))
2120                 ib_umem_release(cq->umem);
2121
2122         if (cq) {
2123                 kfree(cq->cql);
2124                 kfree(cq);
2125         }
2126         atomic_dec(&rdev->cq_count);
2127         rdev->nq.budget--;
2128         return 0;
2129 }
2130
2131 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2132                                 const struct ib_cq_init_attr *attr,
2133                                 struct ib_ucontext *context,
2134                                 struct ib_udata *udata)
2135 {
2136         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2137         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2138         struct bnxt_re_cq *cq = NULL;
2139         int rc, entries;
2140         int cqe = attr->cqe;
2141
2142         /* Validate CQ fields */
2143         if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2144                 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2145                 return ERR_PTR(-EINVAL);
2146         }
2147         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2148         if (!cq)
2149                 return ERR_PTR(-ENOMEM);
2150
2151         cq->rdev = rdev;
2152         cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2153
2154         entries = roundup_pow_of_two(cqe + 1);
2155         if (entries > dev_attr->max_cq_wqes + 1)
2156                 entries = dev_attr->max_cq_wqes + 1;
2157
2158         if (context) {
2159                 struct bnxt_re_cq_req req;
2160                 struct bnxt_re_ucontext *uctx = container_of
2161                                                 (context,
2162                                                  struct bnxt_re_ucontext,
2163                                                  ib_uctx);
2164                 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2165                         rc = -EFAULT;
2166                         goto fail;
2167                 }
2168
2169                 cq->umem = ib_umem_get(context, req.cq_va,
2170                                        entries * sizeof(struct cq_base),
2171                                        IB_ACCESS_LOCAL_WRITE, 1);
2172                 if (IS_ERR(cq->umem)) {
2173                         rc = PTR_ERR(cq->umem);
2174                         goto fail;
2175                 }
2176                 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2177                 cq->qplib_cq.nmap = cq->umem->nmap;
2178                 cq->qplib_cq.dpi = uctx->dpi;
2179         } else {
2180                 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2181                 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2182                                   GFP_KERNEL);
2183                 if (!cq->cql) {
2184                         rc = -ENOMEM;
2185                         goto fail;
2186                 }
2187
2188                 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2189                 cq->qplib_cq.sghead = NULL;
2190                 cq->qplib_cq.nmap = 0;
2191         }
2192         cq->qplib_cq.max_wqe = entries;
2193         cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
2194
2195         rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2196         if (rc) {
2197                 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2198                 goto fail;
2199         }
2200
2201         cq->ib_cq.cqe = entries;
2202         cq->cq_period = cq->qplib_cq.period;
2203         rdev->nq.budget++;
2204
2205         atomic_inc(&rdev->cq_count);
2206
2207         if (context) {
2208                 struct bnxt_re_cq_resp resp;
2209
2210                 resp.cqid = cq->qplib_cq.id;
2211                 resp.tail = cq->qplib_cq.hwq.cons;
2212                 resp.phase = cq->qplib_cq.period;
2213                 resp.rsvd = 0;
2214                 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2215                 if (rc) {
2216                         dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2217                         bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2218                         goto c2fail;
2219                 }
2220         }
2221
2222         return &cq->ib_cq;
2223
2224 c2fail:
2225         if (context)
2226                 ib_umem_release(cq->umem);
2227 fail:
2228         kfree(cq->cql);
2229         kfree(cq);
2230         return ERR_PTR(rc);
2231 }
2232
2233 static u8 __req_to_ib_wc_status(u8 qstatus)
2234 {
2235         switch (qstatus) {
2236         case CQ_REQ_STATUS_OK:
2237                 return IB_WC_SUCCESS;
2238         case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2239                 return IB_WC_BAD_RESP_ERR;
2240         case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2241                 return IB_WC_LOC_LEN_ERR;
2242         case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2243                 return IB_WC_LOC_QP_OP_ERR;
2244         case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2245                 return IB_WC_LOC_PROT_ERR;
2246         case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2247                 return IB_WC_GENERAL_ERR;
2248         case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2249                 return IB_WC_REM_INV_REQ_ERR;
2250         case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2251                 return IB_WC_REM_ACCESS_ERR;
2252         case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2253                 return IB_WC_REM_OP_ERR;
2254         case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2255                 return IB_WC_RNR_RETRY_EXC_ERR;
2256         case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2257                 return IB_WC_RETRY_EXC_ERR;
2258         case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2259                 return IB_WC_WR_FLUSH_ERR;
2260         default:
2261                 return IB_WC_GENERAL_ERR;
2262         }
2263         return 0;
2264 }
2265
2266 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2267 {
2268         switch (qstatus) {
2269         case CQ_RES_RAWETH_QP1_STATUS_OK:
2270                 return IB_WC_SUCCESS;
2271         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2272                 return IB_WC_LOC_ACCESS_ERR;
2273         case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2274                 return IB_WC_LOC_LEN_ERR;
2275         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2276                 return IB_WC_LOC_PROT_ERR;
2277         case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2278                 return IB_WC_LOC_QP_OP_ERR;
2279         case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2280                 return IB_WC_GENERAL_ERR;
2281         case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2282                 return IB_WC_WR_FLUSH_ERR;
2283         case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2284                 return IB_WC_WR_FLUSH_ERR;
2285         default:
2286                 return IB_WC_GENERAL_ERR;
2287         }
2288 }
2289
2290 static u8 __rc_to_ib_wc_status(u8 qstatus)
2291 {
2292         switch (qstatus) {
2293         case CQ_RES_RC_STATUS_OK:
2294                 return IB_WC_SUCCESS;
2295         case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2296                 return IB_WC_LOC_ACCESS_ERR;
2297         case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2298                 return IB_WC_LOC_LEN_ERR;
2299         case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2300                 return IB_WC_LOC_PROT_ERR;
2301         case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2302                 return IB_WC_LOC_QP_OP_ERR;
2303         case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2304                 return IB_WC_GENERAL_ERR;
2305         case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2306                 return IB_WC_REM_INV_REQ_ERR;
2307         case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2308                 return IB_WC_WR_FLUSH_ERR;
2309         case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2310                 return IB_WC_WR_FLUSH_ERR;
2311         default:
2312                 return IB_WC_GENERAL_ERR;
2313         }
2314 }
2315
2316 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2317 {
2318         switch (cqe->type) {
2319         case BNXT_QPLIB_SWQE_TYPE_SEND:
2320                 wc->opcode = IB_WC_SEND;
2321                 break;
2322         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2323                 wc->opcode = IB_WC_SEND;
2324                 wc->wc_flags |= IB_WC_WITH_IMM;
2325                 break;
2326         case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2327                 wc->opcode = IB_WC_SEND;
2328                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2329                 break;
2330         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2331                 wc->opcode = IB_WC_RDMA_WRITE;
2332                 break;
2333         case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2334                 wc->opcode = IB_WC_RDMA_WRITE;
2335                 wc->wc_flags |= IB_WC_WITH_IMM;
2336                 break;
2337         case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2338                 wc->opcode = IB_WC_RDMA_READ;
2339                 break;
2340         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2341                 wc->opcode = IB_WC_COMP_SWAP;
2342                 break;
2343         case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2344                 wc->opcode = IB_WC_FETCH_ADD;
2345                 break;
2346         case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2347                 wc->opcode = IB_WC_LOCAL_INV;
2348                 break;
2349         case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2350                 wc->opcode = IB_WC_REG_MR;
2351                 break;
2352         default:
2353                 wc->opcode = IB_WC_SEND;
2354                 break;
2355         }
2356
2357         wc->status = __req_to_ib_wc_status(cqe->status);
2358 }
2359
2360 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2361                                      u16 raweth_qp1_flags2)
2362 {
2363         bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
2364
2365         /* raweth_qp1_flags Bit 9-6 indicates itype */
2366         if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2367             != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2368                 return -1;
2369
2370         if (raweth_qp1_flags2 &
2371             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2372             raweth_qp1_flags2 &
2373             CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2374                 is_udp = true;
2375                 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2376                 (raweth_qp1_flags2 &
2377                  CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2378                         (is_ipv6 = true) : (is_ipv4 = true);
2379                 return ((is_ipv6) ?
2380                          BNXT_RE_ROCEV2_IPV6_PACKET :
2381                          BNXT_RE_ROCEV2_IPV4_PACKET);
2382         } else {
2383                 return BNXT_RE_ROCE_V1_PACKET;
2384         }
2385 }
2386
2387 static int bnxt_re_to_ib_nw_type(int nw_type)
2388 {
2389         u8 nw_hdr_type = 0xFF;
2390
2391         switch (nw_type) {
2392         case BNXT_RE_ROCE_V1_PACKET:
2393                 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2394                 break;
2395         case BNXT_RE_ROCEV2_IPV4_PACKET:
2396                 nw_hdr_type = RDMA_NETWORK_IPV4;
2397                 break;
2398         case BNXT_RE_ROCEV2_IPV6_PACKET:
2399                 nw_hdr_type = RDMA_NETWORK_IPV6;
2400                 break;
2401         }
2402         return nw_hdr_type;
2403 }
2404
2405 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2406                                        void *rq_hdr_buf)
2407 {
2408         u8 *tmp_buf = NULL;
2409         struct ethhdr *eth_hdr;
2410         u16 eth_type;
2411         bool rc = false;
2412
2413         tmp_buf = (u8 *)rq_hdr_buf;
2414         /*
2415          * If dest mac is not same as I/F mac, this could be a
2416          * loopback address or multicast address, check whether
2417          * it is a loopback packet
2418          */
2419         if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2420                 tmp_buf += 4;
2421                 /* Check the  ether type */
2422                 eth_hdr = (struct ethhdr *)tmp_buf;
2423                 eth_type = ntohs(eth_hdr->h_proto);
2424                 switch (eth_type) {
2425                 case ETH_P_IBOE:
2426                         rc = true;
2427                         break;
2428                 case ETH_P_IP:
2429                 case ETH_P_IPV6: {
2430                         u32 len;
2431                         struct udphdr *udp_hdr;
2432
2433                         len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2434                                                       sizeof(struct ipv6hdr));
2435                         tmp_buf += sizeof(struct ethhdr) + len;
2436                         udp_hdr = (struct udphdr *)tmp_buf;
2437                         if (ntohs(udp_hdr->dest) ==
2438                                     ROCE_V2_UDP_DPORT)
2439                                 rc = true;
2440                         break;
2441                         }
2442                 default:
2443                         break;
2444                 }
2445         }
2446
2447         return rc;
2448 }
2449
2450 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2451                                          struct bnxt_qplib_cqe *cqe)
2452 {
2453         struct bnxt_re_dev *rdev = qp1_qp->rdev;
2454         struct bnxt_re_sqp_entries *sqp_entry = NULL;
2455         struct bnxt_re_qp *qp = rdev->qp1_sqp;
2456         struct ib_send_wr *swr;
2457         struct ib_ud_wr udwr;
2458         struct ib_recv_wr rwr;
2459         int pkt_type = 0;
2460         u32 tbl_idx;
2461         void *rq_hdr_buf;
2462         dma_addr_t rq_hdr_buf_map;
2463         dma_addr_t shrq_hdr_buf_map;
2464         u32 offset = 0;
2465         u32 skip_bytes = 0;
2466         struct ib_sge s_sge[2];
2467         struct ib_sge r_sge[2];
2468         int rc;
2469
2470         memset(&udwr, 0, sizeof(udwr));
2471         memset(&rwr, 0, sizeof(rwr));
2472         memset(&s_sge, 0, sizeof(s_sge));
2473         memset(&r_sge, 0, sizeof(r_sge));
2474
2475         swr = &udwr.wr;
2476         tbl_idx = cqe->wr_id;
2477
2478         rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2479                         (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2480         rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2481                                                           tbl_idx);
2482
2483         /* Shadow QP header buffer */
2484         shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2485                                                             tbl_idx);
2486         sqp_entry = &rdev->sqp_tbl[tbl_idx];
2487
2488         /* Store this cqe */
2489         memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2490         sqp_entry->qp1_qp = qp1_qp;
2491
2492         /* Find packet type from the cqe */
2493
2494         pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2495                                              cqe->raweth_qp1_flags2);
2496         if (pkt_type < 0) {
2497                 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2498                 return -EINVAL;
2499         }
2500
2501         /* Adjust the offset for the user buffer and post in the rq */
2502
2503         if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2504                 offset = 20;
2505
2506         /*
2507          * QP1 loopback packet has 4 bytes of internal header before
2508          * ether header. Skip these four bytes.
2509          */
2510         if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2511                 skip_bytes = 4;
2512
2513         /* First send SGE . Skip the ether header*/
2514         s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2515                         + skip_bytes;
2516         s_sge[0].lkey = 0xFFFFFFFF;
2517         s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2518                                 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2519
2520         /* Second Send SGE */
2521         s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2522                         BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2523         if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2524                 s_sge[1].addr += 8;
2525         s_sge[1].lkey = 0xFFFFFFFF;
2526         s_sge[1].length = 256;
2527
2528         /* First recv SGE */
2529
2530         r_sge[0].addr = shrq_hdr_buf_map;
2531         r_sge[0].lkey = 0xFFFFFFFF;
2532         r_sge[0].length = 40;
2533
2534         r_sge[1].addr = sqp_entry->sge.addr + offset;
2535         r_sge[1].lkey = sqp_entry->sge.lkey;
2536         r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2537
2538         /* Create receive work request */
2539         rwr.num_sge = 2;
2540         rwr.sg_list = r_sge;
2541         rwr.wr_id = tbl_idx;
2542         rwr.next = NULL;
2543
2544         rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2545         if (rc) {
2546                 dev_err(rdev_to_dev(rdev),
2547                         "Failed to post Rx buffers to shadow QP");
2548                 return -ENOMEM;
2549         }
2550
2551         swr->num_sge = 2;
2552         swr->sg_list = s_sge;
2553         swr->wr_id = tbl_idx;
2554         swr->opcode = IB_WR_SEND;
2555         swr->next = NULL;
2556
2557         udwr.ah = &rdev->sqp_ah->ib_ah;
2558         udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2559         udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2560
2561         /* post data received  in the send queue */
2562         rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2563
2564         return 0;
2565 }
2566
2567 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2568                                           struct bnxt_qplib_cqe *cqe)
2569 {
2570         wc->opcode = IB_WC_RECV;
2571         wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2572         wc->wc_flags |= IB_WC_GRH;
2573 }
2574
2575 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
2576                                       struct bnxt_qplib_cqe *cqe)
2577 {
2578         wc->opcode = IB_WC_RECV;
2579         wc->status = __rc_to_ib_wc_status(cqe->status);
2580
2581         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2582                 wc->wc_flags |= IB_WC_WITH_IMM;
2583         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2584                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2585         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2586             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2587                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2588 }
2589
2590 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
2591                                              struct ib_wc *wc,
2592                                              struct bnxt_qplib_cqe *cqe)
2593 {
2594         u32 tbl_idx;
2595         struct bnxt_re_dev *rdev = qp->rdev;
2596         struct bnxt_re_qp *qp1_qp = NULL;
2597         struct bnxt_qplib_cqe *orig_cqe = NULL;
2598         struct bnxt_re_sqp_entries *sqp_entry = NULL;
2599         int nw_type;
2600
2601         tbl_idx = cqe->wr_id;
2602
2603         sqp_entry = &rdev->sqp_tbl[tbl_idx];
2604         qp1_qp = sqp_entry->qp1_qp;
2605         orig_cqe = &sqp_entry->cqe;
2606
2607         wc->wr_id = sqp_entry->wrid;
2608         wc->byte_len = orig_cqe->length;
2609         wc->qp = &qp1_qp->ib_qp;
2610
2611         wc->ex.imm_data = orig_cqe->immdata;
2612         wc->src_qp = orig_cqe->src_qp;
2613         memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
2614         wc->port_num = 1;
2615         wc->vendor_err = orig_cqe->status;
2616
2617         wc->opcode = IB_WC_RECV;
2618         wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
2619         wc->wc_flags |= IB_WC_GRH;
2620
2621         nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
2622                                             orig_cqe->raweth_qp1_flags2);
2623         if (nw_type >= 0) {
2624                 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
2625                 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2626         }
2627 }
2628
2629 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
2630                                       struct bnxt_qplib_cqe *cqe)
2631 {
2632         wc->opcode = IB_WC_RECV;
2633         wc->status = __rc_to_ib_wc_status(cqe->status);
2634
2635         if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
2636                 wc->wc_flags |= IB_WC_WITH_IMM;
2637         if (cqe->flags & CQ_RES_RC_FLAGS_INV)
2638                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2639         if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
2640             (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
2641                 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2642 }
2643
2644 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2645 {
2646         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2647         struct bnxt_re_qp *qp;
2648         struct bnxt_qplib_cqe *cqe;
2649         int i, ncqe, budget;
2650         u32 tbl_idx;
2651         struct bnxt_re_sqp_entries *sqp_entry = NULL;
2652         unsigned long flags;
2653
2654         spin_lock_irqsave(&cq->cq_lock, flags);
2655         budget = min_t(u32, num_entries, cq->max_cql);
2656         if (!cq->cql) {
2657                 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2658                 goto exit;
2659         }
2660         cqe = &cq->cql[0];
2661         while (budget) {
2662                 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget);
2663                 if (!ncqe)
2664                         break;
2665
2666                 for (i = 0; i < ncqe; i++, cqe++) {
2667                         /* Transcribe each qplib_wqe back to ib_wc */
2668                         memset(wc, 0, sizeof(*wc));
2669
2670                         wc->wr_id = cqe->wr_id;
2671                         wc->byte_len = cqe->length;
2672                         qp = container_of
2673                                 ((struct bnxt_qplib_qp *)
2674                                  (unsigned long)(cqe->qp_handle),
2675                                  struct bnxt_re_qp, qplib_qp);
2676                         if (!qp) {
2677                                 dev_err(rdev_to_dev(cq->rdev),
2678                                         "POLL CQ : bad QP handle");
2679                                 continue;
2680                         }
2681                         wc->qp = &qp->ib_qp;
2682                         wc->ex.imm_data = cqe->immdata;
2683                         wc->src_qp = cqe->src_qp;
2684                         memcpy(wc->smac, cqe->smac, ETH_ALEN);
2685                         wc->port_num = 1;
2686                         wc->vendor_err = cqe->status;
2687
2688                         switch (cqe->opcode) {
2689                         case CQ_BASE_CQE_TYPE_REQ:
2690                                 if (qp->qplib_qp.id ==
2691                                     qp->rdev->qp1_sqp->qplib_qp.id) {
2692                                         /* Handle this completion with
2693                                          * the stored completion
2694                                          */
2695                                         memset(wc, 0, sizeof(*wc));
2696                                         continue;
2697                                 }
2698                                 bnxt_re_process_req_wc(wc, cqe);
2699                                 break;
2700                         case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2701                                 if (!cqe->status) {
2702                                         int rc = 0;
2703
2704                                         rc = bnxt_re_process_raw_qp_pkt_rx
2705                                                                 (qp, cqe);
2706                                         if (!rc) {
2707                                                 memset(wc, 0, sizeof(*wc));
2708                                                 continue;
2709                                         }
2710                                         cqe->status = -1;
2711                                 }
2712                                 /* Errors need not be looped back.
2713                                  * But change the wr_id to the one
2714                                  * stored in the table
2715                                  */
2716                                 tbl_idx = cqe->wr_id;
2717                                 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
2718                                 wc->wr_id = sqp_entry->wrid;
2719                                 bnxt_re_process_res_rawqp1_wc(wc, cqe);
2720                                 break;
2721                         case CQ_BASE_CQE_TYPE_RES_RC:
2722                                 bnxt_re_process_res_rc_wc(wc, cqe);
2723                                 break;
2724                         case CQ_BASE_CQE_TYPE_RES_UD:
2725                                 if (qp->qplib_qp.id ==
2726                                     qp->rdev->qp1_sqp->qplib_qp.id) {
2727                                         /* Handle this completion with
2728                                          * the stored completion
2729                                          */
2730                                         if (cqe->status) {
2731                                                 continue;
2732                                         } else {
2733                                                 bnxt_re_process_res_shadow_qp_wc
2734                                                                 (qp, wc, cqe);
2735                                                 break;
2736                                         }
2737                                 }
2738                                 bnxt_re_process_res_ud_wc(wc, cqe);
2739                                 break;
2740                         default:
2741                                 dev_err(rdev_to_dev(cq->rdev),
2742                                         "POLL CQ : type 0x%x not handled",
2743                                         cqe->opcode);
2744                                 continue;
2745                         }
2746                         wc++;
2747                         budget--;
2748                 }
2749         }
2750 exit:
2751         spin_unlock_irqrestore(&cq->cq_lock, flags);
2752         return num_entries - budget;
2753 }
2754
2755 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
2756                           enum ib_cq_notify_flags ib_cqn_flags)
2757 {
2758         struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2759         int type = 0;
2760
2761         /* Trigger on the very next completion */
2762         if (ib_cqn_flags & IB_CQ_NEXT_COMP)
2763                 type = DBR_DBR_TYPE_CQ_ARMALL;
2764         /* Trigger on the next solicited completion */
2765         else if (ib_cqn_flags & IB_CQ_SOLICITED)
2766                 type = DBR_DBR_TYPE_CQ_ARMSE;
2767
2768         bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
2769
2770         return 0;
2771 }
2772
2773 /* Memory Regions */
2774 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
2775 {
2776         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2777         struct bnxt_re_dev *rdev = pd->rdev;
2778         struct bnxt_re_mr *mr;
2779         u64 pbl = 0;
2780         int rc;
2781
2782         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2783         if (!mr)
2784                 return ERR_PTR(-ENOMEM);
2785
2786         mr->rdev = rdev;
2787         mr->qplib_mr.pd = &pd->qplib_pd;
2788         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
2789         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2790
2791         /* Allocate and register 0 as the address */
2792         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
2793         if (rc)
2794                 goto fail;
2795
2796         mr->qplib_mr.hwq.level = PBL_LVL_MAX;
2797         mr->qplib_mr.total_size = -1; /* Infinte length */
2798         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
2799         if (rc)
2800                 goto fail_mr;
2801
2802         mr->ib_mr.lkey = mr->qplib_mr.lkey;
2803         if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
2804                                IB_ACCESS_REMOTE_ATOMIC))
2805                 mr->ib_mr.rkey = mr->ib_mr.lkey;
2806         atomic_inc(&rdev->mr_count);
2807
2808         return &mr->ib_mr;
2809
2810 fail_mr:
2811         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2812 fail:
2813         kfree(mr);
2814         return ERR_PTR(rc);
2815 }
2816
2817 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
2818 {
2819         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
2820         struct bnxt_re_dev *rdev = mr->rdev;
2821         int rc;
2822
2823         if (mr->npages && mr->pages) {
2824                 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
2825                                                         &mr->qplib_frpl);
2826                 kfree(mr->pages);
2827                 mr->npages = 0;
2828                 mr->pages = NULL;
2829         }
2830         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2831
2832         if (!IS_ERR_OR_NULL(mr->ib_umem))
2833                 ib_umem_release(mr->ib_umem);
2834
2835         kfree(mr);
2836         atomic_dec(&rdev->mr_count);
2837         return rc;
2838 }
2839
2840 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
2841 {
2842         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
2843
2844         if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
2845                 return -ENOMEM;
2846
2847         mr->pages[mr->npages++] = addr;
2848         return 0;
2849 }
2850
2851 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
2852                       unsigned int *sg_offset)
2853 {
2854         struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
2855
2856         mr->npages = 0;
2857         return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
2858 }
2859
2860 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
2861                                u32 max_num_sg)
2862 {
2863         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2864         struct bnxt_re_dev *rdev = pd->rdev;
2865         struct bnxt_re_mr *mr = NULL;
2866         int rc;
2867
2868         if (type != IB_MR_TYPE_MEM_REG) {
2869                 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
2870                 return ERR_PTR(-EINVAL);
2871         }
2872         if (max_num_sg > MAX_PBL_LVL_1_PGS)
2873                 return ERR_PTR(-EINVAL);
2874
2875         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2876         if (!mr)
2877                 return ERR_PTR(-ENOMEM);
2878
2879         mr->rdev = rdev;
2880         mr->qplib_mr.pd = &pd->qplib_pd;
2881         mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
2882         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2883
2884         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
2885         if (rc)
2886                 goto fail;
2887
2888         mr->ib_mr.lkey = mr->qplib_mr.lkey;
2889         mr->ib_mr.rkey = mr->ib_mr.lkey;
2890
2891         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2892         if (!mr->pages) {
2893                 rc = -ENOMEM;
2894                 goto fail;
2895         }
2896         rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
2897                                                  &mr->qplib_frpl, max_num_sg);
2898         if (rc) {
2899                 dev_err(rdev_to_dev(rdev),
2900                         "Failed to allocate HW FR page list");
2901                 goto fail_mr;
2902         }
2903
2904         atomic_inc(&rdev->mr_count);
2905         return &mr->ib_mr;
2906
2907 fail_mr:
2908         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
2909 fail:
2910         kfree(mr->pages);
2911         kfree(mr);
2912         return ERR_PTR(rc);
2913 }
2914
2915 /* Fast Memory Regions */
2916 struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags,
2917                                  struct ib_fmr_attr *fmr_attr)
2918 {
2919         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
2920         struct bnxt_re_dev *rdev = pd->rdev;
2921         struct bnxt_re_fmr *fmr;
2922         int rc;
2923
2924         if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS ||
2925             fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) {
2926                 dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
2927                 return ERR_PTR(-ENOMEM);
2928         }
2929         fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
2930         if (!fmr)
2931                 return ERR_PTR(-ENOMEM);
2932
2933         fmr->rdev = rdev;
2934         fmr->qplib_fmr.pd = &pd->qplib_pd;
2935         fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
2936
2937         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
2938         if (rc)
2939                 goto fail;
2940
2941         fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags);
2942         fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey;
2943         fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
2944
2945         atomic_inc(&rdev->mr_count);
2946         return &fmr->ib_fmr;
2947 fail:
2948         kfree(fmr);
2949         return ERR_PTR(rc);
2950 }
2951
2952 int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len,
2953                          u64 iova)
2954 {
2955         struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
2956                                              ib_fmr);
2957         struct bnxt_re_dev *rdev = fmr->rdev;
2958         int rc;
2959
2960         fmr->qplib_fmr.va = iova;
2961         fmr->qplib_fmr.total_size = list_len * PAGE_SIZE;
2962
2963         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list,
2964                                list_len, true);
2965         if (rc)
2966                 dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
2967                         fmr->ib_fmr.lkey);
2968         return rc;
2969 }
2970
2971 int bnxt_re_unmap_fmr(struct list_head *fmr_list)
2972 {
2973         struct bnxt_re_dev *rdev;
2974         struct bnxt_re_fmr *fmr;
2975         struct ib_fmr *ib_fmr;
2976         int rc = 0;
2977
2978         /* Validate each FMRs inside the fmr_list */
2979         list_for_each_entry(ib_fmr, fmr_list, list) {
2980                 fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
2981                 rdev = fmr->rdev;
2982
2983                 if (rdev) {
2984                         rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
2985                                                   &fmr->qplib_fmr, true);
2986                         if (rc)
2987                                 break;
2988                 }
2989         }
2990         return rc;
2991 }
2992
2993 int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr)
2994 {
2995         struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
2996                                                ib_fmr);
2997         struct bnxt_re_dev *rdev = fmr->rdev;
2998         int rc;
2999
3000         rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
3001         if (rc)
3002                 dev_err(rdev_to_dev(rdev), "Failed to free FMR");
3003
3004         kfree(fmr);
3005         atomic_dec(&rdev->mr_count);
3006         return rc;
3007 }
3008
3009 /* uverbs */
3010 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3011                                   u64 virt_addr, int mr_access_flags,
3012                                   struct ib_udata *udata)
3013 {
3014         struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3015         struct bnxt_re_dev *rdev = pd->rdev;
3016         struct bnxt_re_mr *mr;
3017         struct ib_umem *umem;
3018         u64 *pbl_tbl, *pbl_tbl_orig;
3019         int i, umem_pgs, pages, page_shift, rc;
3020         struct scatterlist *sg;
3021         int entry;
3022
3023         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3024         if (!mr)
3025                 return ERR_PTR(-ENOMEM);
3026
3027         mr->rdev = rdev;
3028         mr->qplib_mr.pd = &pd->qplib_pd;
3029         mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3030         mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3031
3032         umem = ib_umem_get(ib_pd->uobject->context, start, length,
3033                            mr_access_flags, 0);
3034         if (IS_ERR(umem)) {
3035                 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3036                 rc = -EFAULT;
3037                 goto free_mr;
3038         }
3039         mr->ib_umem = umem;
3040
3041         rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3042         if (rc) {
3043                 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3044                 goto release_umem;
3045         }
3046         /* The fixed portion of the rkey is the same as the lkey */
3047         mr->ib_mr.rkey = mr->qplib_mr.rkey;
3048
3049         mr->qplib_mr.va = virt_addr;
3050         umem_pgs = ib_umem_page_count(umem);
3051         if (!umem_pgs) {
3052                 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3053                 rc = -EINVAL;
3054                 goto free_mrw;
3055         }
3056         mr->qplib_mr.total_size = length;
3057
3058         pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3059         if (!pbl_tbl) {
3060                 rc = -EINVAL;
3061                 goto free_mrw;
3062         }
3063         pbl_tbl_orig = pbl_tbl;
3064
3065         page_shift = ilog2(umem->page_size);
3066         if (umem->hugetlb) {
3067                 dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
3068                 rc = -EFAULT;
3069                 goto fail;
3070         }
3071         if (umem->page_size != PAGE_SIZE) {
3072                 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3073                 rc = -EFAULT;
3074                 goto fail;
3075         }
3076         /* Map umem buf ptrs to the PBL */
3077         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3078                 pages = sg_dma_len(sg) >> page_shift;
3079                 for (i = 0; i < pages; i++, pbl_tbl++)
3080                         *pbl_tbl = sg_dma_address(sg) + (i << page_shift);
3081         }
3082         rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
3083                                umem_pgs, false);
3084         if (rc) {
3085                 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3086                 goto fail;
3087         }
3088
3089         kfree(pbl_tbl_orig);
3090
3091         mr->ib_mr.lkey = mr->qplib_mr.lkey;
3092         mr->ib_mr.rkey = mr->qplib_mr.lkey;
3093         atomic_inc(&rdev->mr_count);
3094
3095         return &mr->ib_mr;
3096 fail:
3097         kfree(pbl_tbl_orig);
3098 free_mrw:
3099         bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3100 release_umem:
3101         ib_umem_release(umem);
3102 free_mr:
3103         kfree(mr);
3104         return ERR_PTR(rc);
3105 }
3106
3107 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3108                                            struct ib_udata *udata)
3109 {
3110         struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3111         struct bnxt_re_uctx_resp resp;
3112         struct bnxt_re_ucontext *uctx;
3113         struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3114         int rc;
3115
3116         dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3117                 ibdev->uverbs_abi_ver);
3118
3119         if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3120                 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3121                         BNXT_RE_ABI_VERSION);
3122                 return ERR_PTR(-EPERM);
3123         }
3124
3125         uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3126         if (!uctx)
3127                 return ERR_PTR(-ENOMEM);
3128
3129         uctx->rdev = rdev;
3130
3131         uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3132         if (!uctx->shpg) {
3133                 rc = -ENOMEM;
3134                 goto fail;
3135         }
3136         spin_lock_init(&uctx->sh_lock);
3137
3138         resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3139         resp.max_qp = rdev->qplib_ctx.qpc_count;
3140         resp.pg_size = PAGE_SIZE;
3141         resp.cqe_sz = sizeof(struct cq_base);
3142         resp.max_cqd = dev_attr->max_cq_wqes;
3143         resp.rsvd    = 0;
3144
3145         rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3146         if (rc) {
3147                 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3148                 rc = -EFAULT;
3149                 goto cfail;
3150         }
3151
3152         return &uctx->ib_uctx;
3153 cfail:
3154         free_page((unsigned long)uctx->shpg);
3155         uctx->shpg = NULL;
3156 fail:
3157         kfree(uctx);
3158         return ERR_PTR(rc);
3159 }
3160
3161 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3162 {
3163         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3164                                                    struct bnxt_re_ucontext,
3165                                                    ib_uctx);
3166         if (uctx->shpg)
3167                 free_page((unsigned long)uctx->shpg);
3168         kfree(uctx);
3169         return 0;
3170 }
3171
3172 /* Helper function to mmap the virtual memory from user app */
3173 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3174 {
3175         struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3176                                                    struct bnxt_re_ucontext,
3177                                                    ib_uctx);
3178         struct bnxt_re_dev *rdev = uctx->rdev;
3179         u64 pfn;
3180
3181         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3182                 return -EINVAL;
3183
3184         if (vma->vm_pgoff) {
3185                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3186                 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3187                                        PAGE_SIZE, vma->vm_page_prot)) {
3188                         dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3189                         return -EAGAIN;
3190                 }
3191         } else {
3192                 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3193                 if (remap_pfn_range(vma, vma->vm_start,
3194                                     pfn, PAGE_SIZE, vma->vm_page_prot)) {
3195                         dev_err(rdev_to_dev(rdev),
3196                                 "Failed to map shared page");
3197                         return -EAGAIN;
3198                 }
3199         }
3200
3201         return 0;
3202 }