]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/qedr/verbs.c
RDMA/qedr: Don't spam dmesg if QP is in error state
[karo-tx-linux.git] / drivers / infiniband / hw / qedr / verbs.c
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45
46 #include "qedr_hsi.h"
47 #include <linux/qed/qed_if.h>
48 #include "qedr.h"
49 #include "verbs.h"
50 #include <rdma/qedr-abi.h>
51 #include "qedr_cm.h"
52
53 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
54
55 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56 {
57         if (index > QEDR_ROCE_PKEY_TABLE_LEN)
58                 return -EINVAL;
59
60         *pkey = QEDR_ROCE_PKEY_DEFAULT;
61         return 0;
62 }
63
64 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
65                    union ib_gid *sgid)
66 {
67         struct qedr_dev *dev = get_qedr_dev(ibdev);
68         int rc = 0;
69
70         if (!rdma_cap_roce_gid_table(ibdev, port))
71                 return -ENODEV;
72
73         rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
74         if (rc == -EAGAIN) {
75                 memcpy(sgid, &zgid, sizeof(*sgid));
76                 return 0;
77         }
78
79         DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80                  sgid->global.interface_id, sgid->global.subnet_prefix);
81
82         return rc;
83 }
84
85 int qedr_add_gid(struct ib_device *device, u8 port_num,
86                  unsigned int index, const union ib_gid *gid,
87                  const struct ib_gid_attr *attr, void **context)
88 {
89         if (!rdma_cap_roce_gid_table(device, port_num))
90                 return -EINVAL;
91
92         if (port_num > QEDR_MAX_PORT)
93                 return -EINVAL;
94
95         if (!context)
96                 return -EINVAL;
97
98         return 0;
99 }
100
101 int qedr_del_gid(struct ib_device *device, u8 port_num,
102                  unsigned int index, void **context)
103 {
104         if (!rdma_cap_roce_gid_table(device, port_num))
105                 return -EINVAL;
106
107         if (port_num > QEDR_MAX_PORT)
108                 return -EINVAL;
109
110         if (!context)
111                 return -EINVAL;
112
113         return 0;
114 }
115
116 int qedr_query_device(struct ib_device *ibdev,
117                       struct ib_device_attr *attr, struct ib_udata *udata)
118 {
119         struct qedr_dev *dev = get_qedr_dev(ibdev);
120         struct qedr_device_attr *qattr = &dev->attr;
121
122         if (!dev->rdma_ctx) {
123                 DP_ERR(dev,
124                        "qedr_query_device called with invalid params rdma_ctx=%p\n",
125                        dev->rdma_ctx);
126                 return -EINVAL;
127         }
128
129         memset(attr, 0, sizeof(*attr));
130
131         attr->fw_ver = qattr->fw_ver;
132         attr->sys_image_guid = qattr->sys_image_guid;
133         attr->max_mr_size = qattr->max_mr_size;
134         attr->page_size_cap = qattr->page_size_caps;
135         attr->vendor_id = qattr->vendor_id;
136         attr->vendor_part_id = qattr->vendor_part_id;
137         attr->hw_ver = qattr->hw_ver;
138         attr->max_qp = qattr->max_qp;
139         attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141             IB_DEVICE_RC_RNR_NAK_GEN |
142             IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
143
144         attr->max_sge = qattr->max_sge;
145         attr->max_sge_rd = qattr->max_sge;
146         attr->max_cq = qattr->max_cq;
147         attr->max_cqe = qattr->max_cqe;
148         attr->max_mr = qattr->max_mr;
149         attr->max_mw = qattr->max_mw;
150         attr->max_pd = qattr->max_pd;
151         attr->atomic_cap = dev->atomic_cap;
152         attr->max_fmr = qattr->max_fmr;
153         attr->max_map_per_fmr = 16;
154         attr->max_qp_init_rd_atom =
155             1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156         attr->max_qp_rd_atom =
157             min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158                 attr->max_qp_init_rd_atom);
159
160         attr->max_srq = qattr->max_srq;
161         attr->max_srq_sge = qattr->max_srq_sge;
162         attr->max_srq_wr = qattr->max_srq_wr;
163
164         attr->local_ca_ack_delay = qattr->dev_ack_delay;
165         attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166         attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167         attr->max_ah = qattr->max_ah;
168
169         return 0;
170 }
171
172 #define QEDR_SPEED_SDR          (1)
173 #define QEDR_SPEED_DDR          (2)
174 #define QEDR_SPEED_QDR          (4)
175 #define QEDR_SPEED_FDR10        (8)
176 #define QEDR_SPEED_FDR          (16)
177 #define QEDR_SPEED_EDR          (32)
178
179 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
180                                             u8 *ib_width)
181 {
182         switch (speed) {
183         case 1000:
184                 *ib_speed = QEDR_SPEED_SDR;
185                 *ib_width = IB_WIDTH_1X;
186                 break;
187         case 10000:
188                 *ib_speed = QEDR_SPEED_QDR;
189                 *ib_width = IB_WIDTH_1X;
190                 break;
191
192         case 20000:
193                 *ib_speed = QEDR_SPEED_DDR;
194                 *ib_width = IB_WIDTH_4X;
195                 break;
196
197         case 25000:
198                 *ib_speed = QEDR_SPEED_EDR;
199                 *ib_width = IB_WIDTH_1X;
200                 break;
201
202         case 40000:
203                 *ib_speed = QEDR_SPEED_QDR;
204                 *ib_width = IB_WIDTH_4X;
205                 break;
206
207         case 50000:
208                 *ib_speed = QEDR_SPEED_QDR;
209                 *ib_width = IB_WIDTH_4X;
210                 break;
211
212         case 100000:
213                 *ib_speed = QEDR_SPEED_EDR;
214                 *ib_width = IB_WIDTH_4X;
215                 break;
216
217         default:
218                 /* Unsupported */
219                 *ib_speed = QEDR_SPEED_SDR;
220                 *ib_width = IB_WIDTH_1X;
221         }
222 }
223
224 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
225 {
226         struct qedr_dev *dev;
227         struct qed_rdma_port *rdma_port;
228
229         dev = get_qedr_dev(ibdev);
230         if (port > 1) {
231                 DP_ERR(dev, "invalid_port=0x%x\n", port);
232                 return -EINVAL;
233         }
234
235         if (!dev->rdma_ctx) {
236                 DP_ERR(dev, "rdma_ctx is NULL\n");
237                 return -EINVAL;
238         }
239
240         rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241         memset(attr, 0, sizeof(*attr));
242
243         if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244                 attr->state = IB_PORT_ACTIVE;
245                 attr->phys_state = 5;
246         } else {
247                 attr->state = IB_PORT_DOWN;
248                 attr->phys_state = 3;
249         }
250         attr->max_mtu = IB_MTU_4096;
251         attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
252         attr->lid = 0;
253         attr->lmc = 0;
254         attr->sm_lid = 0;
255         attr->sm_sl = 0;
256         attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257         attr->gid_tbl_len = QEDR_MAX_SGID;
258         attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260         attr->qkey_viol_cntr = 0;
261         get_link_speed_and_width(rdma_port->link_speed,
262                                  &attr->active_speed, &attr->active_width);
263         attr->max_msg_sz = rdma_port->max_msg_size;
264         attr->max_vl_num = 4;
265
266         return 0;
267 }
268
269 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270                      struct ib_port_modify *props)
271 {
272         struct qedr_dev *dev;
273
274         dev = get_qedr_dev(ibdev);
275         if (port > 1) {
276                 DP_ERR(dev, "invalid_port=0x%x\n", port);
277                 return -EINVAL;
278         }
279
280         return 0;
281 }
282
283 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
284                          unsigned long len)
285 {
286         struct qedr_mm *mm;
287
288         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
289         if (!mm)
290                 return -ENOMEM;
291
292         mm->key.phy_addr = phy_addr;
293         /* This function might be called with a length which is not a multiple
294          * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295          * forces this granularity by increasing the requested size if needed.
296          * When qedr_mmap is called, it will search the list with the updated
297          * length as a key. To prevent search failures, the length is rounded up
298          * in advance to PAGE_SIZE.
299          */
300         mm->key.len = roundup(len, PAGE_SIZE);
301         INIT_LIST_HEAD(&mm->entry);
302
303         mutex_lock(&uctx->mm_list_lock);
304         list_add(&mm->entry, &uctx->mm_head);
305         mutex_unlock(&uctx->mm_list_lock);
306
307         DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308                  "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309                  (unsigned long long)mm->key.phy_addr,
310                  (unsigned long)mm->key.len, uctx);
311
312         return 0;
313 }
314
315 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
316                              unsigned long len)
317 {
318         bool found = false;
319         struct qedr_mm *mm;
320
321         mutex_lock(&uctx->mm_list_lock);
322         list_for_each_entry(mm, &uctx->mm_head, entry) {
323                 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
324                         continue;
325
326                 found = true;
327                 break;
328         }
329         mutex_unlock(&uctx->mm_list_lock);
330         DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331                  "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332                  mm->key.phy_addr, mm->key.len, uctx, found);
333
334         return found;
335 }
336
337 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338                                         struct ib_udata *udata)
339 {
340         int rc;
341         struct qedr_ucontext *ctx;
342         struct qedr_alloc_ucontext_resp uresp;
343         struct qedr_dev *dev = get_qedr_dev(ibdev);
344         struct qed_rdma_add_user_out_params oparams;
345
346         if (!udata)
347                 return ERR_PTR(-EFAULT);
348
349         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
350         if (!ctx)
351                 return ERR_PTR(-ENOMEM);
352
353         rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
354         if (rc) {
355                 DP_ERR(dev,
356                        "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
357                        rc);
358                 goto err;
359         }
360
361         ctx->dpi = oparams.dpi;
362         ctx->dpi_addr = oparams.dpi_addr;
363         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364         ctx->dpi_size = oparams.dpi_size;
365         INIT_LIST_HEAD(&ctx->mm_head);
366         mutex_init(&ctx->mm_list_lock);
367
368         memset(&uresp, 0, sizeof(uresp));
369
370         uresp.db_pa = ctx->dpi_phys_addr;
371         uresp.db_size = ctx->dpi_size;
372         uresp.max_send_wr = dev->attr.max_sqe;
373         uresp.max_recv_wr = dev->attr.max_rqe;
374         uresp.max_srq_wr = dev->attr.max_srq_wr;
375         uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376         uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378         uresp.max_cqes = QEDR_MAX_CQES;
379
380         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
381         if (rc)
382                 goto err;
383
384         ctx->dev = dev;
385
386         rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387         if (rc)
388                 goto err;
389
390         DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
391                  &ctx->ibucontext);
392         return &ctx->ibucontext;
393
394 err:
395         kfree(ctx);
396         return ERR_PTR(rc);
397 }
398
399 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
400 {
401         struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402         struct qedr_mm *mm, *tmp;
403         int status = 0;
404
405         DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
406                  uctx);
407         uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
408
409         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410                 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411                          "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412                          mm->key.phy_addr, mm->key.len, uctx);
413                 list_del(&mm->entry);
414                 kfree(mm);
415         }
416
417         kfree(uctx);
418         return status;
419 }
420
421 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
422 {
423         struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424         struct qedr_dev *dev = get_qedr_dev(context->device);
425         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426         u64 unmapped_db = dev->db_phys_addr;
427         unsigned long len = (vma->vm_end - vma->vm_start);
428         int rc = 0;
429         bool found;
430
431         DP_DEBUG(dev, QEDR_MSG_INIT,
432                  "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433                  vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434         if (vma->vm_start & (PAGE_SIZE - 1)) {
435                 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
436                        vma->vm_start);
437                 return -EINVAL;
438         }
439
440         found = qedr_search_mmap(ucontext, vm_page, len);
441         if (!found) {
442                 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
443                        vma->vm_pgoff);
444                 return -EINVAL;
445         }
446
447         DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
448
449         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
450                                                      dev->db_size))) {
451                 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452                 if (vma->vm_flags & VM_READ) {
453                         DP_ERR(dev, "Trying to map doorbell bar for read\n");
454                         return -EPERM;
455                 }
456
457                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
458
459                 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460                                         PAGE_SIZE, vma->vm_page_prot);
461         } else {
462                 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463                 rc = remap_pfn_range(vma, vma->vm_start,
464                                      vma->vm_pgoff, len, vma->vm_page_prot);
465         }
466         DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
467         return rc;
468 }
469
470 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471                             struct ib_ucontext *context, struct ib_udata *udata)
472 {
473         struct qedr_dev *dev = get_qedr_dev(ibdev);
474         struct qedr_ucontext *uctx = NULL;
475         struct qedr_alloc_pd_uresp uresp;
476         struct qedr_pd *pd;
477         u16 pd_id;
478         int rc;
479
480         DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
481                  (udata && context) ? "User Lib" : "Kernel");
482
483         if (!dev->rdma_ctx) {
484                 DP_ERR(dev, "invlaid RDMA context\n");
485                 return ERR_PTR(-EINVAL);
486         }
487
488         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
489         if (!pd)
490                 return ERR_PTR(-ENOMEM);
491
492         dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
493
494         uresp.pd_id = pd_id;
495         pd->pd_id = pd_id;
496
497         if (udata && context) {
498                 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499                 if (rc)
500                         DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501                 uctx = get_qedr_ucontext(context);
502                 uctx->pd = pd;
503                 pd->uctx = uctx;
504         }
505
506         return &pd->ibpd;
507 }
508
509 int qedr_dealloc_pd(struct ib_pd *ibpd)
510 {
511         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
512         struct qedr_pd *pd = get_qedr_pd(ibpd);
513
514         if (!pd) {
515                 pr_err("Invalid PD received in dealloc_pd\n");
516                 return -EINVAL;
517         }
518
519         DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
520         dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
521
522         kfree(pd);
523
524         return 0;
525 }
526
527 static void qedr_free_pbl(struct qedr_dev *dev,
528                           struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
529 {
530         struct pci_dev *pdev = dev->pdev;
531         int i;
532
533         for (i = 0; i < pbl_info->num_pbls; i++) {
534                 if (!pbl[i].va)
535                         continue;
536                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
537                                   pbl[i].va, pbl[i].pa);
538         }
539
540         kfree(pbl);
541 }
542
543 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
544 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
545
546 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
547 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
548 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
549
550 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
551                                            struct qedr_pbl_info *pbl_info,
552                                            gfp_t flags)
553 {
554         struct pci_dev *pdev = dev->pdev;
555         struct qedr_pbl *pbl_table;
556         dma_addr_t *pbl_main_tbl;
557         dma_addr_t pa;
558         void *va;
559         int i;
560
561         pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
562         if (!pbl_table)
563                 return ERR_PTR(-ENOMEM);
564
565         for (i = 0; i < pbl_info->num_pbls; i++) {
566                 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
567                                         &pa, flags);
568                 if (!va)
569                         goto err;
570
571                 memset(va, 0, pbl_info->pbl_size);
572                 pbl_table[i].va = va;
573                 pbl_table[i].pa = pa;
574         }
575
576         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
577          * the first one with physical pointers to all of the rest
578          */
579         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
580         for (i = 0; i < pbl_info->num_pbls - 1; i++)
581                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
582
583         return pbl_table;
584
585 err:
586         for (i--; i >= 0; i--)
587                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
588                                   pbl_table[i].va, pbl_table[i].pa);
589
590         qedr_free_pbl(dev, pbl_info, pbl_table);
591
592         return ERR_PTR(-ENOMEM);
593 }
594
595 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
596                                 struct qedr_pbl_info *pbl_info,
597                                 u32 num_pbes, int two_layer_capable)
598 {
599         u32 pbl_capacity;
600         u32 pbl_size;
601         u32 num_pbls;
602
603         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
604                 if (num_pbes > MAX_PBES_TWO_LAYER) {
605                         DP_ERR(dev, "prepare pbl table: too many pages %d\n",
606                                num_pbes);
607                         return -EINVAL;
608                 }
609
610                 /* calculate required pbl page size */
611                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
612                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
613                                NUM_PBES_ON_PAGE(pbl_size);
614
615                 while (pbl_capacity < num_pbes) {
616                         pbl_size *= 2;
617                         pbl_capacity = pbl_size / sizeof(u64);
618                         pbl_capacity = pbl_capacity * pbl_capacity;
619                 }
620
621                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
622                 num_pbls++;     /* One for the layer0 ( points to the pbls) */
623                 pbl_info->two_layered = true;
624         } else {
625                 /* One layered PBL */
626                 num_pbls = 1;
627                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
628                                  roundup_pow_of_two((num_pbes * sizeof(u64))));
629                 pbl_info->two_layered = false;
630         }
631
632         pbl_info->num_pbls = num_pbls;
633         pbl_info->pbl_size = pbl_size;
634         pbl_info->num_pbes = num_pbes;
635
636         DP_DEBUG(dev, QEDR_MSG_MR,
637                  "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
638                  pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
639
640         return 0;
641 }
642
643 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
644                                struct qedr_pbl *pbl,
645                                struct qedr_pbl_info *pbl_info)
646 {
647         int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
648         struct qedr_pbl *pbl_tbl;
649         struct scatterlist *sg;
650         struct regpair *pbe;
651         int entry;
652         u32 addr;
653
654         if (!pbl_info->num_pbes)
655                 return;
656
657         /* If we have a two layered pbl, the first pbl points to the rest
658          * of the pbls and the first entry lays on the second pbl in the table
659          */
660         if (pbl_info->two_layered)
661                 pbl_tbl = &pbl[1];
662         else
663                 pbl_tbl = pbl;
664
665         pbe = (struct regpair *)pbl_tbl->va;
666         if (!pbe) {
667                 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
668                 return;
669         }
670
671         pbe_cnt = 0;
672
673         shift = ilog2(umem->page_size);
674
675         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
676                 pages = sg_dma_len(sg) >> shift;
677                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
678                         /* store the page address in pbe */
679                         pbe->lo = cpu_to_le32(sg_dma_address(sg) +
680                                               umem->page_size * pg_cnt);
681                         addr = upper_32_bits(sg_dma_address(sg) +
682                                              umem->page_size * pg_cnt);
683                         pbe->hi = cpu_to_le32(addr);
684                         pbe_cnt++;
685                         total_num_pbes++;
686                         pbe++;
687
688                         if (total_num_pbes == pbl_info->num_pbes)
689                                 return;
690
691                         /* If the given pbl is full storing the pbes,
692                          * move to next pbl.
693                          */
694                         if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
695                                 pbl_tbl++;
696                                 pbe = (struct regpair *)pbl_tbl->va;
697                                 pbe_cnt = 0;
698                         }
699                 }
700         }
701 }
702
703 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
704                               struct qedr_cq *cq, struct ib_udata *udata)
705 {
706         struct qedr_create_cq_uresp uresp;
707         int rc;
708
709         memset(&uresp, 0, sizeof(uresp));
710
711         uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
712         uresp.icid = cq->icid;
713
714         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
715         if (rc)
716                 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
717
718         return rc;
719 }
720
721 static void consume_cqe(struct qedr_cq *cq)
722 {
723         if (cq->latest_cqe == cq->toggle_cqe)
724                 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
725
726         cq->latest_cqe = qed_chain_consume(&cq->pbl);
727 }
728
729 static inline int qedr_align_cq_entries(int entries)
730 {
731         u64 size, aligned_size;
732
733         /* We allocate an extra entry that we don't report to the FW. */
734         size = (entries + 1) * QEDR_CQE_SIZE;
735         aligned_size = ALIGN(size, PAGE_SIZE);
736
737         return aligned_size / QEDR_CQE_SIZE;
738 }
739
740 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
741                                        struct qedr_dev *dev,
742                                        struct qedr_userq *q,
743                                        u64 buf_addr, size_t buf_len,
744                                        int access, int dmasync)
745 {
746         int page_cnt;
747         int rc;
748
749         q->buf_addr = buf_addr;
750         q->buf_len = buf_len;
751         q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
752         if (IS_ERR(q->umem)) {
753                 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
754                        PTR_ERR(q->umem));
755                 return PTR_ERR(q->umem);
756         }
757
758         page_cnt = ib_umem_page_count(q->umem);
759         rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
760         if (rc)
761                 goto err0;
762
763         q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
764         if (IS_ERR_OR_NULL(q->pbl_tbl))
765                 goto err0;
766
767         qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
768
769         return 0;
770
771 err0:
772         ib_umem_release(q->umem);
773
774         return rc;
775 }
776
777 static inline void qedr_init_cq_params(struct qedr_cq *cq,
778                                        struct qedr_ucontext *ctx,
779                                        struct qedr_dev *dev, int vector,
780                                        int chain_entries, int page_cnt,
781                                        u64 pbl_ptr,
782                                        struct qed_rdma_create_cq_in_params
783                                        *params)
784 {
785         memset(params, 0, sizeof(*params));
786         params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
787         params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
788         params->cnq_id = vector;
789         params->cq_size = chain_entries - 1;
790         params->dpi = (ctx) ? ctx->dpi : dev->dpi;
791         params->pbl_num_pages = page_cnt;
792         params->pbl_ptr = pbl_ptr;
793         params->pbl_two_level = 0;
794 }
795
796 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
797 {
798         /* Flush data before signalling doorbell */
799         wmb();
800         cq->db.data.agg_flags = flags;
801         cq->db.data.value = cpu_to_le32(cons);
802         writeq(cq->db.raw, cq->db_addr);
803
804         /* Make sure write would stick */
805         mmiowb();
806 }
807
808 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
809 {
810         struct qedr_cq *cq = get_qedr_cq(ibcq);
811         unsigned long sflags;
812
813         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
814                 return 0;
815
816         spin_lock_irqsave(&cq->cq_lock, sflags);
817
818         cq->arm_flags = 0;
819
820         if (flags & IB_CQ_SOLICITED)
821                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
822
823         if (flags & IB_CQ_NEXT_COMP)
824                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
825
826         doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
827
828         spin_unlock_irqrestore(&cq->cq_lock, sflags);
829
830         return 0;
831 }
832
833 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
834                              const struct ib_cq_init_attr *attr,
835                              struct ib_ucontext *ib_ctx, struct ib_udata *udata)
836 {
837         struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
838         struct qed_rdma_destroy_cq_out_params destroy_oparams;
839         struct qed_rdma_destroy_cq_in_params destroy_iparams;
840         struct qedr_dev *dev = get_qedr_dev(ibdev);
841         struct qed_rdma_create_cq_in_params params;
842         struct qedr_create_cq_ureq ureq;
843         int vector = attr->comp_vector;
844         int entries = attr->cqe;
845         struct qedr_cq *cq;
846         int chain_entries;
847         int page_cnt;
848         u64 pbl_ptr;
849         u16 icid;
850         int rc;
851
852         DP_DEBUG(dev, QEDR_MSG_INIT,
853                  "create_cq: called from %s. entries=%d, vector=%d\n",
854                  udata ? "User Lib" : "Kernel", entries, vector);
855
856         if (entries > QEDR_MAX_CQES) {
857                 DP_ERR(dev,
858                        "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
859                        entries, QEDR_MAX_CQES);
860                 return ERR_PTR(-EINVAL);
861         }
862
863         chain_entries = qedr_align_cq_entries(entries);
864         chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
865
866         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
867         if (!cq)
868                 return ERR_PTR(-ENOMEM);
869
870         if (udata) {
871                 memset(&ureq, 0, sizeof(ureq));
872                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
873                         DP_ERR(dev,
874                                "create cq: problem copying data from user space\n");
875                         goto err0;
876                 }
877
878                 if (!ureq.len) {
879                         DP_ERR(dev,
880                                "create cq: cannot create a cq with 0 entries\n");
881                         goto err0;
882                 }
883
884                 cq->cq_type = QEDR_CQ_TYPE_USER;
885
886                 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
887                                           ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
888                 if (rc)
889                         goto err0;
890
891                 pbl_ptr = cq->q.pbl_tbl->pa;
892                 page_cnt = cq->q.pbl_info.num_pbes;
893
894                 cq->ibcq.cqe = chain_entries;
895         } else {
896                 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
897
898                 rc = dev->ops->common->chain_alloc(dev->cdev,
899                                                    QED_CHAIN_USE_TO_CONSUME,
900                                                    QED_CHAIN_MODE_PBL,
901                                                    QED_CHAIN_CNT_TYPE_U32,
902                                                    chain_entries,
903                                                    sizeof(union rdma_cqe),
904                                                    &cq->pbl);
905                 if (rc)
906                         goto err1;
907
908                 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
909                 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
910                 cq->ibcq.cqe = cq->pbl.capacity;
911         }
912
913         qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
914                             pbl_ptr, &params);
915
916         rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
917         if (rc)
918                 goto err2;
919
920         cq->icid = icid;
921         cq->sig = QEDR_CQ_MAGIC_NUMBER;
922         spin_lock_init(&cq->cq_lock);
923
924         if (ib_ctx) {
925                 rc = qedr_copy_cq_uresp(dev, cq, udata);
926                 if (rc)
927                         goto err3;
928         } else {
929                 /* Generate doorbell address. */
930                 cq->db_addr = dev->db_addr +
931                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
932                 cq->db.data.icid = cq->icid;
933                 cq->db.data.params = DB_AGG_CMD_SET <<
934                     RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
935
936                 /* point to the very last element, passing it we will toggle */
937                 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
938                 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
939                 cq->latest_cqe = NULL;
940                 consume_cqe(cq);
941                 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
942         }
943
944         DP_DEBUG(dev, QEDR_MSG_CQ,
945                  "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
946                  cq->icid, cq, params.cq_size);
947
948         return &cq->ibcq;
949
950 err3:
951         destroy_iparams.icid = cq->icid;
952         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
953                                   &destroy_oparams);
954 err2:
955         if (udata)
956                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
957         else
958                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
959 err1:
960         if (udata)
961                 ib_umem_release(cq->q.umem);
962 err0:
963         kfree(cq);
964         return ERR_PTR(-EINVAL);
965 }
966
967 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
968 {
969         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
970         struct qedr_cq *cq = get_qedr_cq(ibcq);
971
972         DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
973
974         return 0;
975 }
976
977 int qedr_destroy_cq(struct ib_cq *ibcq)
978 {
979         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
980         struct qed_rdma_destroy_cq_out_params oparams;
981         struct qed_rdma_destroy_cq_in_params iparams;
982         struct qedr_cq *cq = get_qedr_cq(ibcq);
983
984         DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
985
986         /* GSIs CQs are handled by driver, so they don't exist in the FW */
987         if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
988                 int rc;
989
990                 iparams.icid = cq->icid;
991                 rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
992                                                &oparams);
993                 if (rc)
994                         return rc;
995                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
996         }
997
998         if (ibcq->uobject && ibcq->uobject->context) {
999                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1000                 ib_umem_release(cq->q.umem);
1001         }
1002
1003         kfree(cq);
1004
1005         return 0;
1006 }
1007
1008 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1009                                           struct ib_qp_attr *attr,
1010                                           int attr_mask,
1011                                           struct qed_rdma_modify_qp_in_params
1012                                           *qp_params)
1013 {
1014         enum rdma_network_type nw_type;
1015         struct ib_gid_attr gid_attr;
1016         union ib_gid gid;
1017         u32 ipv4_addr;
1018         int rc = 0;
1019         int i;
1020
1021         rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1022                                attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1023         if (rc)
1024                 return rc;
1025
1026         if (!memcmp(&gid, &zgid, sizeof(gid)))
1027                 return -ENOENT;
1028
1029         if (gid_attr.ndev) {
1030                 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1031
1032                 dev_put(gid_attr.ndev);
1033                 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1034                 switch (nw_type) {
1035                 case RDMA_NETWORK_IPV6:
1036                         memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037                                sizeof(qp_params->sgid));
1038                         memcpy(&qp_params->dgid.bytes[0],
1039                                &attr->ah_attr.grh.dgid,
1040                                sizeof(qp_params->dgid));
1041                         qp_params->roce_mode = ROCE_V2_IPV6;
1042                         SET_FIELD(qp_params->modify_flags,
1043                                   QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1044                         break;
1045                 case RDMA_NETWORK_IB:
1046                         memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1047                                sizeof(qp_params->sgid));
1048                         memcpy(&qp_params->dgid.bytes[0],
1049                                &attr->ah_attr.grh.dgid,
1050                                sizeof(qp_params->dgid));
1051                         qp_params->roce_mode = ROCE_V1;
1052                         break;
1053                 case RDMA_NETWORK_IPV4:
1054                         memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1055                         memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1056                         ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1057                         qp_params->sgid.ipv4_addr = ipv4_addr;
1058                         ipv4_addr =
1059                             qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1060                         qp_params->dgid.ipv4_addr = ipv4_addr;
1061                         SET_FIELD(qp_params->modify_flags,
1062                                   QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1063                         qp_params->roce_mode = ROCE_V2_IPV4;
1064                         break;
1065                 }
1066         }
1067
1068         for (i = 0; i < 4; i++) {
1069                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1070                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1071         }
1072
1073         if (qp_params->vlan_id >= VLAN_CFI_MASK)
1074                 qp_params->vlan_id = 0;
1075
1076         return 0;
1077 }
1078
1079 static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1080 {
1081         qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1082         ib_umem_release(qp->usq.umem);
1083 }
1084
1085 static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1086 {
1087         qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1088         ib_umem_release(qp->urq.umem);
1089 }
1090
1091 static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1092 {
1093         dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1094         kfree(qp->wqe_wr_id);
1095 }
1096
1097 static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1098 {
1099         dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1100         kfree(qp->rqe_wr_id);
1101 }
1102
1103 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1104                                struct ib_qp_init_attr *attrs)
1105 {
1106         struct qedr_device_attr *qattr = &dev->attr;
1107
1108         /* QP0... attrs->qp_type == IB_QPT_GSI */
1109         if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1110                 DP_DEBUG(dev, QEDR_MSG_QP,
1111                          "create qp: unsupported qp type=0x%x requested\n",
1112                          attrs->qp_type);
1113                 return -EINVAL;
1114         }
1115
1116         if (attrs->cap.max_send_wr > qattr->max_sqe) {
1117                 DP_ERR(dev,
1118                        "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1119                        attrs->cap.max_send_wr, qattr->max_sqe);
1120                 return -EINVAL;
1121         }
1122
1123         if (attrs->cap.max_inline_data > qattr->max_inline) {
1124                 DP_ERR(dev,
1125                        "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1126                        attrs->cap.max_inline_data, qattr->max_inline);
1127                 return -EINVAL;
1128         }
1129
1130         if (attrs->cap.max_send_sge > qattr->max_sge) {
1131                 DP_ERR(dev,
1132                        "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1133                        attrs->cap.max_send_sge, qattr->max_sge);
1134                 return -EINVAL;
1135         }
1136
1137         if (attrs->cap.max_recv_sge > qattr->max_sge) {
1138                 DP_ERR(dev,
1139                        "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1140                        attrs->cap.max_recv_sge, qattr->max_sge);
1141                 return -EINVAL;
1142         }
1143
1144         /* Unprivileged user space cannot create special QP */
1145         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1146                 DP_ERR(dev,
1147                        "create qp: userspace can't create special QPs of type=0x%x\n",
1148                        attrs->qp_type);
1149                 return -EINVAL;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1156                                struct qedr_qp *qp)
1157 {
1158         uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1159         uresp->rq_icid = qp->icid;
1160 }
1161
1162 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1163                                struct qedr_qp *qp)
1164 {
1165         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1166         uresp->sq_icid = qp->icid + 1;
1167 }
1168
1169 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1170                               struct qedr_qp *qp, struct ib_udata *udata)
1171 {
1172         struct qedr_create_qp_uresp uresp;
1173         int rc;
1174
1175         memset(&uresp, 0, sizeof(uresp));
1176         qedr_copy_sq_uresp(&uresp, qp);
1177         qedr_copy_rq_uresp(&uresp, qp);
1178
1179         uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1180         uresp.qp_id = qp->qp_id;
1181
1182         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1183         if (rc)
1184                 DP_ERR(dev,
1185                        "create qp: failed a copy to user space with qp icid=0x%x.\n",
1186                        qp->icid);
1187
1188         return rc;
1189 }
1190
1191 static void qedr_set_qp_init_params(struct qedr_dev *dev,
1192                                     struct qedr_qp *qp,
1193                                     struct qedr_pd *pd,
1194                                     struct ib_qp_init_attr *attrs)
1195 {
1196         qp->pd = pd;
1197
1198         spin_lock_init(&qp->q_lock);
1199
1200         qp->qp_type = attrs->qp_type;
1201         qp->max_inline_data = attrs->cap.max_inline_data;
1202         qp->sq.max_sges = attrs->cap.max_send_sge;
1203         qp->state = QED_ROCE_QP_STATE_RESET;
1204         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1205         qp->sq_cq = get_qedr_cq(attrs->send_cq);
1206         qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1207         qp->dev = dev;
1208
1209         DP_DEBUG(dev, QEDR_MSG_QP,
1210                  "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1211                  pd->pd_id, qp->qp_type, qp->max_inline_data,
1212                  qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1213         DP_DEBUG(dev, QEDR_MSG_QP,
1214                  "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1215                  qp->sq.max_sges, qp->sq_cq->icid);
1216         qp->rq.max_sges = attrs->cap.max_recv_sge;
1217         DP_DEBUG(dev, QEDR_MSG_QP,
1218                  "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1219                  qp->rq.max_sges, qp->rq_cq->icid);
1220 }
1221
1222 static inline void
1223 qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1224                          struct qedr_create_qp_ureq *ureq)
1225 {
1226         /* QP handle to be written in CQE */
1227         params->qp_handle_lo = ureq->qp_handle_lo;
1228         params->qp_handle_hi = ureq->qp_handle_hi;
1229 }
1230
1231 static inline void
1232 qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1233 {
1234         qp->sq.db = dev->db_addr +
1235                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1236         qp->sq.db_data.data.icid = qp->icid + 1;
1237 }
1238
1239 static inline void
1240 qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1241 {
1242         qp->rq.db = dev->db_addr +
1243                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1244         qp->rq.db_data.data.icid = qp->icid;
1245 }
1246
1247 static inline int
1248 qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1249                               struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1250 {
1251         /* Allocate driver internal RQ array */
1252         qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1253                                 GFP_KERNEL);
1254         if (!qp->rqe_wr_id)
1255                 return -ENOMEM;
1256
1257         DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1258
1259         return 0;
1260 }
1261
1262 static inline int
1263 qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1264                               struct qedr_qp *qp,
1265                               struct ib_qp_init_attr *attrs,
1266                               struct qed_rdma_create_qp_in_params *params)
1267 {
1268         u32 temp_max_wr;
1269
1270         /* Allocate driver internal SQ array */
1271         temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1272         temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1273
1274         /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1275         qp->sq.max_wr = (u16)temp_max_wr;
1276         qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1277                                 GFP_KERNEL);
1278         if (!qp->wqe_wr_id)
1279                 return -ENOMEM;
1280
1281         DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1282
1283         /* QP handle to be written in CQE */
1284         params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1285         params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1286
1287         return 0;
1288 }
1289
1290 static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1291                                          struct qedr_qp *qp,
1292                                          struct ib_qp_init_attr *attrs)
1293 {
1294         u32 n_sq_elems, n_sq_entries;
1295         int rc;
1296
1297         /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1298          * the ring. The ring should allow at least a single WR, even if the
1299          * user requested none, due to allocation issues.
1300          */
1301         n_sq_entries = attrs->cap.max_send_wr;
1302         n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1303         n_sq_entries = max_t(u32, n_sq_entries, 1);
1304         n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1305         rc = dev->ops->common->chain_alloc(dev->cdev,
1306                                            QED_CHAIN_USE_TO_PRODUCE,
1307                                            QED_CHAIN_MODE_PBL,
1308                                            QED_CHAIN_CNT_TYPE_U32,
1309                                            n_sq_elems,
1310                                            QEDR_SQE_ELEMENT_SIZE,
1311                                            &qp->sq.pbl);
1312         if (rc) {
1313                 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1314                 return rc;
1315         }
1316
1317         DP_DEBUG(dev, QEDR_MSG_SQ,
1318                  "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1319                  qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1320                  n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1321         return 0;
1322 }
1323
1324 static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1325                                          struct qedr_qp *qp,
1326                                          struct ib_qp_init_attr *attrs)
1327 {
1328         u32 n_rq_elems, n_rq_entries;
1329         int rc;
1330
1331         /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1332          * the ring. There ring should allow at least a single WR, even if the
1333          * user requested none, due to allocation issues.
1334          */
1335         n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1336         n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1337         rc = dev->ops->common->chain_alloc(dev->cdev,
1338                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1339                                            QED_CHAIN_MODE_PBL,
1340                                            QED_CHAIN_CNT_TYPE_U32,
1341                                            n_rq_elems,
1342                                            QEDR_RQE_ELEMENT_SIZE,
1343                                            &qp->rq.pbl);
1344
1345         if (rc) {
1346                 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1347                 return -ENOMEM;
1348         }
1349
1350         DP_DEBUG(dev, QEDR_MSG_RQ,
1351                  "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1352                  qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1353                  n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1354
1355         /* n_rq_entries < u16 so the casting is safe */
1356         qp->rq.max_wr = (u16)n_rq_entries;
1357
1358         return 0;
1359 }
1360
1361 static inline void
1362 qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1363                           struct qedr_pd *pd,
1364                           struct qedr_qp *qp,
1365                           struct ib_qp_init_attr *attrs,
1366                           struct ib_udata *udata,
1367                           struct qed_rdma_create_qp_in_params *params)
1368 {
1369         /* QP handle to be written in an async event */
1370         params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1371         params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1372
1373         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1374         params->fmr_and_reserved_lkey = !udata;
1375         params->pd = pd->pd_id;
1376         params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1377         params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1378         params->max_sq_sges = 0;
1379         params->stats_queue = 0;
1380
1381         if (udata) {
1382                 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1383                 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1384         } else {
1385                 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1386                 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1387         }
1388 }
1389
1390 static inline void
1391 qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1392                           struct ib_qp_init_attr *attrs,
1393                           struct ib_udata *udata,
1394                           struct qed_rdma_create_qp_in_params *params)
1395 {
1396         params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1397         params->srq_id = 0;
1398         params->use_srq = false;
1399
1400         if (udata) {
1401                 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1402                 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1403         } else {
1404                 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1405                 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1406         }
1407 }
1408
1409 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1410 {
1411         DP_DEBUG(dev, QEDR_MSG_QP,
1412                  "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1413                  qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1414                  qp->urq.buf_len);
1415 }
1416
1417 static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1418                                     struct qedr_dev *dev,
1419                                     struct qedr_qp *qp,
1420                                     struct qedr_create_qp_ureq *ureq)
1421 {
1422         int rc;
1423
1424         /* SQ - read access only (0), dma sync not required (0) */
1425         rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1426                                   ureq->sq_len, 0, 0);
1427         if (rc)
1428                 return rc;
1429
1430         /* RQ - read access only (0), dma sync not required (0) */
1431         rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1432                                   ureq->rq_len, 0, 0);
1433
1434         if (rc)
1435                 qedr_cleanup_user_sq(dev, qp);
1436         return rc;
1437 }
1438
1439 static inline int
1440 qedr_init_kernel_qp(struct qedr_dev *dev,
1441                     struct qedr_qp *qp,
1442                     struct ib_qp_init_attr *attrs,
1443                     struct qed_rdma_create_qp_in_params *params)
1444 {
1445         int rc;
1446
1447         rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1448         if (rc) {
1449                 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1450                 return rc;
1451         }
1452
1453         rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1454         if (rc) {
1455                 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1456                 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1457                 return rc;
1458         }
1459
1460         rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1461         if (rc) {
1462                 qedr_cleanup_kernel_sq(dev, qp);
1463                 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1464                 return rc;
1465         }
1466
1467         rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1468         if (rc) {
1469                 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1470                 qedr_cleanup_kernel_sq(dev, qp);
1471                 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1472                 return rc;
1473         }
1474
1475         return rc;
1476 }
1477
1478 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1479                              struct ib_qp_init_attr *attrs,
1480                              struct ib_udata *udata)
1481 {
1482         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1483         struct qed_rdma_create_qp_out_params out_params;
1484         struct qed_rdma_create_qp_in_params in_params;
1485         struct qedr_pd *pd = get_qedr_pd(ibpd);
1486         struct ib_ucontext *ib_ctx = NULL;
1487         struct qedr_ucontext *ctx = NULL;
1488         struct qedr_create_qp_ureq ureq;
1489         struct qedr_qp *qp;
1490         struct ib_qp *ibqp;
1491         int rc = 0;
1492
1493         DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1494                  udata ? "user library" : "kernel", pd);
1495
1496         rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1497         if (rc)
1498                 return ERR_PTR(rc);
1499
1500         if (attrs->srq)
1501                 return ERR_PTR(-EINVAL);
1502
1503         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1504         if (!qp)
1505                 return ERR_PTR(-ENOMEM);
1506
1507         DP_DEBUG(dev, QEDR_MSG_QP,
1508                  "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1509                  get_qedr_cq(attrs->send_cq),
1510                  get_qedr_cq(attrs->send_cq)->icid,
1511                  get_qedr_cq(attrs->recv_cq),
1512                  get_qedr_cq(attrs->recv_cq)->icid);
1513
1514         qedr_set_qp_init_params(dev, qp, pd, attrs);
1515
1516         if (attrs->qp_type == IB_QPT_GSI) {
1517                 if (udata) {
1518                         DP_ERR(dev,
1519                                "create qp: unexpected udata when creating GSI QP\n");
1520                         goto err0;
1521                 }
1522                 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
1523                 if (IS_ERR(ibqp))
1524                         kfree(qp);
1525                 return ibqp;
1526         }
1527
1528         memset(&in_params, 0, sizeof(in_params));
1529
1530         if (udata) {
1531                 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1532                         goto err0;
1533
1534                 ib_ctx = ibpd->uobject->context;
1535                 ctx = get_qedr_ucontext(ib_ctx);
1536
1537                 memset(&ureq, 0, sizeof(ureq));
1538                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1539                         DP_ERR(dev,
1540                                "create qp: problem copying data from user space\n");
1541                         goto err0;
1542                 }
1543
1544                 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1545                 if (rc)
1546                         goto err0;
1547
1548                 qedr_init_qp_user_params(&in_params, &ureq);
1549         } else {
1550                 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1551                 if (rc)
1552                         goto err0;
1553         }
1554
1555         qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1556         qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1557
1558         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1559                                               &in_params, &out_params);
1560
1561         if (!qp->qed_qp)
1562                 goto err1;
1563
1564         qp->qp_id = out_params.qp_id;
1565         qp->icid = out_params.icid;
1566         qp->ibqp.qp_num = qp->qp_id;
1567
1568         if (udata) {
1569                 rc = qedr_copy_qp_uresp(dev, qp, udata);
1570                 if (rc)
1571                         goto err2;
1572
1573                 qedr_qp_user_print(dev, qp);
1574         } else {
1575                 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1576                 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1577         }
1578
1579         DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1580                  udata ? "user" : "kernel", qp);
1581
1582         return &qp->ibqp;
1583
1584 err2:
1585         rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1586         if (rc)
1587                 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1588 err1:
1589         if (udata) {
1590                 qedr_cleanup_user_sq(dev, qp);
1591                 qedr_cleanup_user_rq(dev, qp);
1592         } else {
1593                 qedr_cleanup_kernel_sq(dev, qp);
1594                 qedr_cleanup_kernel_rq(dev, qp);
1595         }
1596
1597 err0:
1598         kfree(qp);
1599
1600         return ERR_PTR(-EFAULT);
1601 }
1602
1603 enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1604 {
1605         switch (qp_state) {
1606         case QED_ROCE_QP_STATE_RESET:
1607                 return IB_QPS_RESET;
1608         case QED_ROCE_QP_STATE_INIT:
1609                 return IB_QPS_INIT;
1610         case QED_ROCE_QP_STATE_RTR:
1611                 return IB_QPS_RTR;
1612         case QED_ROCE_QP_STATE_RTS:
1613                 return IB_QPS_RTS;
1614         case QED_ROCE_QP_STATE_SQD:
1615                 return IB_QPS_SQD;
1616         case QED_ROCE_QP_STATE_ERR:
1617                 return IB_QPS_ERR;
1618         case QED_ROCE_QP_STATE_SQE:
1619                 return IB_QPS_SQE;
1620         }
1621         return IB_QPS_ERR;
1622 }
1623
1624 enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1625 {
1626         switch (qp_state) {
1627         case IB_QPS_RESET:
1628                 return QED_ROCE_QP_STATE_RESET;
1629         case IB_QPS_INIT:
1630                 return QED_ROCE_QP_STATE_INIT;
1631         case IB_QPS_RTR:
1632                 return QED_ROCE_QP_STATE_RTR;
1633         case IB_QPS_RTS:
1634                 return QED_ROCE_QP_STATE_RTS;
1635         case IB_QPS_SQD:
1636                 return QED_ROCE_QP_STATE_SQD;
1637         case IB_QPS_ERR:
1638                 return QED_ROCE_QP_STATE_ERR;
1639         default:
1640                 return QED_ROCE_QP_STATE_ERR;
1641         }
1642 }
1643
1644 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1645 {
1646         qed_chain_reset(&qph->pbl);
1647         qph->prod = 0;
1648         qph->cons = 0;
1649         qph->wqe_cons = 0;
1650         qph->db_data.data.value = cpu_to_le16(0);
1651 }
1652
1653 static int qedr_update_qp_state(struct qedr_dev *dev,
1654                                 struct qedr_qp *qp,
1655                                 enum qed_roce_qp_state new_state)
1656 {
1657         int status = 0;
1658
1659         if (new_state == qp->state)
1660                 return 0;
1661
1662         switch (qp->state) {
1663         case QED_ROCE_QP_STATE_RESET:
1664                 switch (new_state) {
1665                 case QED_ROCE_QP_STATE_INIT:
1666                         qp->prev_wqe_size = 0;
1667                         qedr_reset_qp_hwq_info(&qp->sq);
1668                         qedr_reset_qp_hwq_info(&qp->rq);
1669                         break;
1670                 default:
1671                         status = -EINVAL;
1672                         break;
1673                 };
1674                 break;
1675         case QED_ROCE_QP_STATE_INIT:
1676                 switch (new_state) {
1677                 case QED_ROCE_QP_STATE_RTR:
1678                         /* Update doorbell (in case post_recv was
1679                          * done before move to RTR)
1680                          */
1681                         wmb();
1682                         writel(qp->rq.db_data.raw, qp->rq.db);
1683                         /* Make sure write takes effect */
1684                         mmiowb();
1685                         break;
1686                 case QED_ROCE_QP_STATE_ERR:
1687                         break;
1688                 default:
1689                         /* Invalid state change. */
1690                         status = -EINVAL;
1691                         break;
1692                 };
1693                 break;
1694         case QED_ROCE_QP_STATE_RTR:
1695                 /* RTR->XXX */
1696                 switch (new_state) {
1697                 case QED_ROCE_QP_STATE_RTS:
1698                         break;
1699                 case QED_ROCE_QP_STATE_ERR:
1700                         break;
1701                 default:
1702                         /* Invalid state change. */
1703                         status = -EINVAL;
1704                         break;
1705                 };
1706                 break;
1707         case QED_ROCE_QP_STATE_RTS:
1708                 /* RTS->XXX */
1709                 switch (new_state) {
1710                 case QED_ROCE_QP_STATE_SQD:
1711                         break;
1712                 case QED_ROCE_QP_STATE_ERR:
1713                         break;
1714                 default:
1715                         /* Invalid state change. */
1716                         status = -EINVAL;
1717                         break;
1718                 };
1719                 break;
1720         case QED_ROCE_QP_STATE_SQD:
1721                 /* SQD->XXX */
1722                 switch (new_state) {
1723                 case QED_ROCE_QP_STATE_RTS:
1724                 case QED_ROCE_QP_STATE_ERR:
1725                         break;
1726                 default:
1727                         /* Invalid state change. */
1728                         status = -EINVAL;
1729                         break;
1730                 };
1731                 break;
1732         case QED_ROCE_QP_STATE_ERR:
1733                 /* ERR->XXX */
1734                 switch (new_state) {
1735                 case QED_ROCE_QP_STATE_RESET:
1736                         break;
1737                 default:
1738                         status = -EINVAL;
1739                         break;
1740                 };
1741                 break;
1742         default:
1743                 status = -EINVAL;
1744                 break;
1745         };
1746
1747         return status;
1748 }
1749
1750 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1751                    int attr_mask, struct ib_udata *udata)
1752 {
1753         struct qedr_qp *qp = get_qedr_qp(ibqp);
1754         struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1755         struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1756         enum ib_qp_state old_qp_state, new_qp_state;
1757         int rc = 0;
1758
1759         DP_DEBUG(dev, QEDR_MSG_QP,
1760                  "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1761                  attr->qp_state);
1762
1763         old_qp_state = qedr_get_ibqp_state(qp->state);
1764         if (attr_mask & IB_QP_STATE)
1765                 new_qp_state = attr->qp_state;
1766         else
1767                 new_qp_state = old_qp_state;
1768
1769         if (!ib_modify_qp_is_ok
1770             (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1771              IB_LINK_LAYER_ETHERNET)) {
1772                 DP_ERR(dev,
1773                        "modify qp: invalid attribute mask=0x%x specified for\n"
1774                        "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1775                        attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1776                        new_qp_state);
1777                 rc = -EINVAL;
1778                 goto err;
1779         }
1780
1781         /* Translate the masks... */
1782         if (attr_mask & IB_QP_STATE) {
1783                 SET_FIELD(qp_params.modify_flags,
1784                           QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1785                 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1786         }
1787
1788         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1789                 qp_params.sqd_async = true;
1790
1791         if (attr_mask & IB_QP_PKEY_INDEX) {
1792                 SET_FIELD(qp_params.modify_flags,
1793                           QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1794                 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1795                         rc = -EINVAL;
1796                         goto err;
1797                 }
1798
1799                 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1800         }
1801
1802         if (attr_mask & IB_QP_QKEY)
1803                 qp->qkey = attr->qkey;
1804
1805         if (attr_mask & IB_QP_ACCESS_FLAGS) {
1806                 SET_FIELD(qp_params.modify_flags,
1807                           QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1808                 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1809                                                   IB_ACCESS_REMOTE_READ;
1810                 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1811                                                    IB_ACCESS_REMOTE_WRITE;
1812                 qp_params.incoming_atomic_en = attr->qp_access_flags &
1813                                                IB_ACCESS_REMOTE_ATOMIC;
1814         }
1815
1816         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1817                 if (attr_mask & IB_QP_PATH_MTU) {
1818                         if (attr->path_mtu < IB_MTU_256 ||
1819                             attr->path_mtu > IB_MTU_4096) {
1820                                 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1821                                 rc = -EINVAL;
1822                                 goto err;
1823                         }
1824                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1825                                       ib_mtu_enum_to_int(iboe_get_mtu
1826                                                          (dev->ndev->mtu)));
1827                 }
1828
1829                 if (!qp->mtu) {
1830                         qp->mtu =
1831                         ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1832                         pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1833                 }
1834
1835                 SET_FIELD(qp_params.modify_flags,
1836                           QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1837
1838                 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1839                 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1840                 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1841
1842                 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1843
1844                 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1845                 if (rc) {
1846                         DP_ERR(dev,
1847                                "modify qp: problems with GID index %d (rc=%d)\n",
1848                                attr->ah_attr.grh.sgid_index, rc);
1849                         return rc;
1850                 }
1851
1852                 rc = qedr_get_dmac(dev, &attr->ah_attr,
1853                                    qp_params.remote_mac_addr);
1854                 if (rc)
1855                         return rc;
1856
1857                 qp_params.use_local_mac = true;
1858                 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1859
1860                 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1861                          qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1862                          qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1863                 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1864                          qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1865                          qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1866                 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1867                          qp_params.remote_mac_addr);
1868 ;
1869
1870                 qp_params.mtu = qp->mtu;
1871                 qp_params.lb_indication = false;
1872         }
1873
1874         if (!qp_params.mtu) {
1875                 /* Stay with current MTU */
1876                 if (qp->mtu)
1877                         qp_params.mtu = qp->mtu;
1878                 else
1879                         qp_params.mtu =
1880                             ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1881         }
1882
1883         if (attr_mask & IB_QP_TIMEOUT) {
1884                 SET_FIELD(qp_params.modify_flags,
1885                           QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1886
1887                 qp_params.ack_timeout = attr->timeout;
1888                 if (attr->timeout) {
1889                         u32 temp;
1890
1891                         temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1892                         /* FW requires [msec] */
1893                         qp_params.ack_timeout = temp;
1894                 } else {
1895                         /* Infinite */
1896                         qp_params.ack_timeout = 0;
1897                 }
1898         }
1899         if (attr_mask & IB_QP_RETRY_CNT) {
1900                 SET_FIELD(qp_params.modify_flags,
1901                           QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1902                 qp_params.retry_cnt = attr->retry_cnt;
1903         }
1904
1905         if (attr_mask & IB_QP_RNR_RETRY) {
1906                 SET_FIELD(qp_params.modify_flags,
1907                           QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1908                 qp_params.rnr_retry_cnt = attr->rnr_retry;
1909         }
1910
1911         if (attr_mask & IB_QP_RQ_PSN) {
1912                 SET_FIELD(qp_params.modify_flags,
1913                           QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1914                 qp_params.rq_psn = attr->rq_psn;
1915                 qp->rq_psn = attr->rq_psn;
1916         }
1917
1918         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1919                 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1920                         rc = -EINVAL;
1921                         DP_ERR(dev,
1922                                "unsupported max_rd_atomic=%d, supported=%d\n",
1923                                attr->max_rd_atomic,
1924                                dev->attr.max_qp_req_rd_atomic_resc);
1925                         goto err;
1926                 }
1927
1928                 SET_FIELD(qp_params.modify_flags,
1929                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1930                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1931         }
1932
1933         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1934                 SET_FIELD(qp_params.modify_flags,
1935                           QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1936                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1937         }
1938
1939         if (attr_mask & IB_QP_SQ_PSN) {
1940                 SET_FIELD(qp_params.modify_flags,
1941                           QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1942                 qp_params.sq_psn = attr->sq_psn;
1943                 qp->sq_psn = attr->sq_psn;
1944         }
1945
1946         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1947                 if (attr->max_dest_rd_atomic >
1948                     dev->attr.max_qp_resp_rd_atomic_resc) {
1949                         DP_ERR(dev,
1950                                "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1951                                attr->max_dest_rd_atomic,
1952                                dev->attr.max_qp_resp_rd_atomic_resc);
1953
1954                         rc = -EINVAL;
1955                         goto err;
1956                 }
1957
1958                 SET_FIELD(qp_params.modify_flags,
1959                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1960                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1961         }
1962
1963         if (attr_mask & IB_QP_DEST_QPN) {
1964                 SET_FIELD(qp_params.modify_flags,
1965                           QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1966
1967                 qp_params.dest_qp = attr->dest_qp_num;
1968                 qp->dest_qp_num = attr->dest_qp_num;
1969         }
1970
1971         if (qp->qp_type != IB_QPT_GSI)
1972                 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1973                                               qp->qed_qp, &qp_params);
1974
1975         if (attr_mask & IB_QP_STATE) {
1976                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1977                         rc = qedr_update_qp_state(dev, qp, qp_params.new_state);
1978                 qp->state = qp_params.new_state;
1979         }
1980
1981 err:
1982         return rc;
1983 }
1984
1985 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1986 {
1987         int ib_qp_acc_flags = 0;
1988
1989         if (params->incoming_rdma_write_en)
1990                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1991         if (params->incoming_rdma_read_en)
1992                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1993         if (params->incoming_atomic_en)
1994                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1995         ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1996         return ib_qp_acc_flags;
1997 }
1998
1999 int qedr_query_qp(struct ib_qp *ibqp,
2000                   struct ib_qp_attr *qp_attr,
2001                   int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2002 {
2003         struct qed_rdma_query_qp_out_params params;
2004         struct qedr_qp *qp = get_qedr_qp(ibqp);
2005         struct qedr_dev *dev = qp->dev;
2006         int rc = 0;
2007
2008         memset(&params, 0, sizeof(params));
2009
2010         rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2011         if (rc)
2012                 goto err;
2013
2014         memset(qp_attr, 0, sizeof(*qp_attr));
2015         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2016
2017         qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2018         qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2019         qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2020         qp_attr->path_mig_state = IB_MIG_MIGRATED;
2021         qp_attr->rq_psn = params.rq_psn;
2022         qp_attr->sq_psn = params.sq_psn;
2023         qp_attr->dest_qp_num = params.dest_qp;
2024
2025         qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2026
2027         qp_attr->cap.max_send_wr = qp->sq.max_wr;
2028         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2029         qp_attr->cap.max_send_sge = qp->sq.max_sges;
2030         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2031         qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2032         qp_init_attr->cap = qp_attr->cap;
2033
2034         memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
2035                sizeof(qp_attr->ah_attr.grh.dgid.raw));
2036
2037         qp_attr->ah_attr.grh.flow_label = params.flow_label;
2038         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2039         qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2040         qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2041
2042         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2043         qp_attr->ah_attr.port_num = 1;
2044         qp_attr->ah_attr.sl = 0;
2045         qp_attr->timeout = params.timeout;
2046         qp_attr->rnr_retry = params.rnr_retry;
2047         qp_attr->retry_cnt = params.retry_cnt;
2048         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2049         qp_attr->pkey_index = params.pkey_index;
2050         qp_attr->port_num = 1;
2051         qp_attr->ah_attr.src_path_bits = 0;
2052         qp_attr->ah_attr.static_rate = 0;
2053         qp_attr->alt_pkey_index = 0;
2054         qp_attr->alt_port_num = 0;
2055         qp_attr->alt_timeout = 0;
2056         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2057
2058         qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2059         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2060         qp_attr->max_rd_atomic = params.max_rd_atomic;
2061         qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2062
2063         DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2064                  qp_attr->cap.max_inline_data);
2065
2066 err:
2067         return rc;
2068 }
2069
2070 int qedr_destroy_qp(struct ib_qp *ibqp)
2071 {
2072         struct qedr_qp *qp = get_qedr_qp(ibqp);
2073         struct qedr_dev *dev = qp->dev;
2074         struct ib_qp_attr attr;
2075         int attr_mask = 0;
2076         int rc = 0;
2077
2078         DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2079                  qp, qp->qp_type);
2080
2081         if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2082             (qp->state != QED_ROCE_QP_STATE_ERR) &&
2083             (qp->state != QED_ROCE_QP_STATE_INIT)) {
2084
2085                 attr.qp_state = IB_QPS_ERR;
2086                 attr_mask |= IB_QP_STATE;
2087
2088                 /* Change the QP state to ERROR */
2089                 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2090         }
2091
2092         if (qp->qp_type != IB_QPT_GSI) {
2093                 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2094                 if (rc)
2095                         return rc;
2096         } else {
2097                 qedr_destroy_gsi_qp(dev);
2098         }
2099
2100         if (ibqp->uobject && ibqp->uobject->context) {
2101                 qedr_cleanup_user_sq(dev, qp);
2102                 qedr_cleanup_user_rq(dev, qp);
2103         } else {
2104                 qedr_cleanup_kernel_sq(dev, qp);
2105                 qedr_cleanup_kernel_rq(dev, qp);
2106         }
2107
2108         kfree(qp);
2109
2110         return rc;
2111 }
2112
2113 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
2114                              struct ib_udata *udata)
2115 {
2116         struct qedr_ah *ah;
2117
2118         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2119         if (!ah)
2120                 return ERR_PTR(-ENOMEM);
2121
2122         ah->attr = *attr;
2123
2124         return &ah->ibah;
2125 }
2126
2127 int qedr_destroy_ah(struct ib_ah *ibah)
2128 {
2129         struct qedr_ah *ah = get_qedr_ah(ibah);
2130
2131         kfree(ah);
2132         return 0;
2133 }
2134
2135 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2136 {
2137         struct qedr_pbl *pbl, *tmp;
2138
2139         if (info->pbl_table)
2140                 list_add_tail(&info->pbl_table->list_entry,
2141                               &info->free_pbl_list);
2142
2143         if (!list_empty(&info->inuse_pbl_list))
2144                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2145
2146         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2147                 list_del(&pbl->list_entry);
2148                 qedr_free_pbl(dev, &info->pbl_info, pbl);
2149         }
2150 }
2151
2152 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2153                         size_t page_list_len, bool two_layered)
2154 {
2155         struct qedr_pbl *tmp;
2156         int rc;
2157
2158         INIT_LIST_HEAD(&info->free_pbl_list);
2159         INIT_LIST_HEAD(&info->inuse_pbl_list);
2160
2161         rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2162                                   page_list_len, two_layered);
2163         if (rc)
2164                 goto done;
2165
2166         info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2167         if (!info->pbl_table) {
2168                 rc = -ENOMEM;
2169                 goto done;
2170         }
2171
2172         DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2173                  &info->pbl_table->pa);
2174
2175         /* in usual case we use 2 PBLs, so we add one to free
2176          * list and allocating another one
2177          */
2178         tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2179         if (!tmp) {
2180                 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2181                 goto done;
2182         }
2183
2184         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2185
2186         DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2187
2188 done:
2189         if (rc)
2190                 free_mr_info(dev, info);
2191
2192         return rc;
2193 }
2194
2195 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2196                                u64 usr_addr, int acc, struct ib_udata *udata)
2197 {
2198         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2199         struct qedr_mr *mr;
2200         struct qedr_pd *pd;
2201         int rc = -ENOMEM;
2202
2203         pd = get_qedr_pd(ibpd);
2204         DP_DEBUG(dev, QEDR_MSG_MR,
2205                  "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2206                  pd->pd_id, start, len, usr_addr, acc);
2207
2208         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2209                 return ERR_PTR(-EINVAL);
2210
2211         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2212         if (!mr)
2213                 return ERR_PTR(rc);
2214
2215         mr->type = QEDR_MR_USER;
2216
2217         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2218         if (IS_ERR(mr->umem)) {
2219                 rc = -EFAULT;
2220                 goto err0;
2221         }
2222
2223         rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2224         if (rc)
2225                 goto err1;
2226
2227         qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2228                            &mr->info.pbl_info);
2229
2230         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2231         if (rc) {
2232                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2233                 goto err1;
2234         }
2235
2236         /* Index only, 18 bit long, lkey = itid << 8 | key */
2237         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2238         mr->hw_mr.key = 0;
2239         mr->hw_mr.pd = pd->pd_id;
2240         mr->hw_mr.local_read = 1;
2241         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2242         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2243         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2244         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2245         mr->hw_mr.mw_bind = false;
2246         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2247         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2248         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2249         mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2250         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2251         mr->hw_mr.length = len;
2252         mr->hw_mr.vaddr = usr_addr;
2253         mr->hw_mr.zbva = false;
2254         mr->hw_mr.phy_mr = false;
2255         mr->hw_mr.dma_mr = false;
2256
2257         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2258         if (rc) {
2259                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2260                 goto err2;
2261         }
2262
2263         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2264         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2265             mr->hw_mr.remote_atomic)
2266                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2267
2268         DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2269                  mr->ibmr.lkey);
2270         return &mr->ibmr;
2271
2272 err2:
2273         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2274 err1:
2275         qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2276 err0:
2277         kfree(mr);
2278         return ERR_PTR(rc);
2279 }
2280
2281 int qedr_dereg_mr(struct ib_mr *ib_mr)
2282 {
2283         struct qedr_mr *mr = get_qedr_mr(ib_mr);
2284         struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2285         int rc = 0;
2286
2287         rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2288         if (rc)
2289                 return rc;
2290
2291         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2292
2293         if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2294                 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2295
2296         /* it could be user registered memory. */
2297         if (mr->umem)
2298                 ib_umem_release(mr->umem);
2299
2300         kfree(mr);
2301
2302         return rc;
2303 }
2304
2305 struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2306 {
2307         struct qedr_pd *pd = get_qedr_pd(ibpd);
2308         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2309         struct qedr_mr *mr;
2310         int rc = -ENOMEM;
2311
2312         DP_DEBUG(dev, QEDR_MSG_MR,
2313                  "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2314                  max_page_list_len);
2315
2316         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2317         if (!mr)
2318                 return ERR_PTR(rc);
2319
2320         mr->dev = dev;
2321         mr->type = QEDR_MR_FRMR;
2322
2323         rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2324         if (rc)
2325                 goto err0;
2326
2327         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2328         if (rc) {
2329                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2330                 goto err0;
2331         }
2332
2333         /* Index only, 18 bit long, lkey = itid << 8 | key */
2334         mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2335         mr->hw_mr.key = 0;
2336         mr->hw_mr.pd = pd->pd_id;
2337         mr->hw_mr.local_read = 1;
2338         mr->hw_mr.local_write = 0;
2339         mr->hw_mr.remote_read = 0;
2340         mr->hw_mr.remote_write = 0;
2341         mr->hw_mr.remote_atomic = 0;
2342         mr->hw_mr.mw_bind = false;
2343         mr->hw_mr.pbl_ptr = 0;
2344         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2345         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2346         mr->hw_mr.fbo = 0;
2347         mr->hw_mr.length = 0;
2348         mr->hw_mr.vaddr = 0;
2349         mr->hw_mr.zbva = false;
2350         mr->hw_mr.phy_mr = true;
2351         mr->hw_mr.dma_mr = false;
2352
2353         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2354         if (rc) {
2355                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2356                 goto err1;
2357         }
2358
2359         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2360         mr->ibmr.rkey = mr->ibmr.lkey;
2361
2362         DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2363         return mr;
2364
2365 err1:
2366         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2367 err0:
2368         kfree(mr);
2369         return ERR_PTR(rc);
2370 }
2371
2372 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2373                             enum ib_mr_type mr_type, u32 max_num_sg)
2374 {
2375         struct qedr_dev *dev;
2376         struct qedr_mr *mr;
2377
2378         if (mr_type != IB_MR_TYPE_MEM_REG)
2379                 return ERR_PTR(-EINVAL);
2380
2381         mr = __qedr_alloc_mr(ibpd, max_num_sg);
2382
2383         if (IS_ERR(mr))
2384                 return ERR_PTR(-EINVAL);
2385
2386         dev = mr->dev;
2387
2388         return &mr->ibmr;
2389 }
2390
2391 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2392 {
2393         struct qedr_mr *mr = get_qedr_mr(ibmr);
2394         struct qedr_pbl *pbl_table;
2395         struct regpair *pbe;
2396         u32 pbes_in_page;
2397
2398         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2399                 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2400                 return -ENOMEM;
2401         }
2402
2403         DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2404                  mr->npages, addr);
2405
2406         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2407         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2408         pbe = (struct regpair *)pbl_table->va;
2409         pbe +=  mr->npages % pbes_in_page;
2410         pbe->lo = cpu_to_le32((u32)addr);
2411         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2412
2413         mr->npages++;
2414
2415         return 0;
2416 }
2417
2418 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2419 {
2420         int work = info->completed - info->completed_handled - 1;
2421
2422         DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2423         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2424                 struct qedr_pbl *pbl;
2425
2426                 /* Free all the page list that are possible to be freed
2427                  * (all the ones that were invalidated), under the assumption
2428                  * that if an FMR was completed successfully that means that
2429                  * if there was an invalidate operation before it also ended
2430                  */
2431                 pbl = list_first_entry(&info->inuse_pbl_list,
2432                                        struct qedr_pbl, list_entry);
2433                 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
2434                 info->completed_handled++;
2435         }
2436 }
2437
2438 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2439                    int sg_nents, unsigned int *sg_offset)
2440 {
2441         struct qedr_mr *mr = get_qedr_mr(ibmr);
2442
2443         mr->npages = 0;
2444
2445         handle_completed_mrs(mr->dev, &mr->info);
2446         return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2447 }
2448
2449 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2450 {
2451         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2452         struct qedr_pd *pd = get_qedr_pd(ibpd);
2453         struct qedr_mr *mr;
2454         int rc;
2455
2456         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2457         if (!mr)
2458                 return ERR_PTR(-ENOMEM);
2459
2460         mr->type = QEDR_MR_DMA;
2461
2462         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2463         if (rc) {
2464                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2465                 goto err1;
2466         }
2467
2468         /* index only, 18 bit long, lkey = itid << 8 | key */
2469         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2470         mr->hw_mr.pd = pd->pd_id;
2471         mr->hw_mr.local_read = 1;
2472         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2473         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2474         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2475         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2476         mr->hw_mr.dma_mr = true;
2477
2478         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2479         if (rc) {
2480                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2481                 goto err2;
2482         }
2483
2484         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2485         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2486             mr->hw_mr.remote_atomic)
2487                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2488
2489         DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2490         return &mr->ibmr;
2491
2492 err2:
2493         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2494 err1:
2495         kfree(mr);
2496         return ERR_PTR(rc);
2497 }
2498
2499 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2500 {
2501         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2502 }
2503
2504 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2505 {
2506         int i, len = 0;
2507
2508         for (i = 0; i < num_sge; i++)
2509                 len += sg_list[i].length;
2510
2511         return len;
2512 }
2513
2514 static void swap_wqe_data64(u64 *p)
2515 {
2516         int i;
2517
2518         for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2519                 *p = cpu_to_be64(cpu_to_le64(*p));
2520 }
2521
2522 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2523                                        struct qedr_qp *qp, u8 *wqe_size,
2524                                        struct ib_send_wr *wr,
2525                                        struct ib_send_wr **bad_wr, u8 *bits,
2526                                        u8 bit)
2527 {
2528         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2529         char *seg_prt, *wqe;
2530         int i, seg_siz;
2531
2532         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2533                 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2534                 *bad_wr = wr;
2535                 return 0;
2536         }
2537
2538         if (!data_size)
2539                 return data_size;
2540
2541         *bits |= bit;
2542
2543         seg_prt = NULL;
2544         wqe = NULL;
2545         seg_siz = 0;
2546
2547         /* Copy data inline */
2548         for (i = 0; i < wr->num_sge; i++) {
2549                 u32 len = wr->sg_list[i].length;
2550                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2551
2552                 while (len > 0) {
2553                         u32 cur;
2554
2555                         /* New segment required */
2556                         if (!seg_siz) {
2557                                 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2558                                 seg_prt = wqe;
2559                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
2560                                 (*wqe_size)++;
2561                         }
2562
2563                         /* Calculate currently allowed length */
2564                         cur = min_t(u32, len, seg_siz);
2565                         memcpy(seg_prt, src, cur);
2566
2567                         /* Update segment variables */
2568                         seg_prt += cur;
2569                         seg_siz -= cur;
2570
2571                         /* Update sge variables */
2572                         src += cur;
2573                         len -= cur;
2574
2575                         /* Swap fully-completed segments */
2576                         if (!seg_siz)
2577                                 swap_wqe_data64((u64 *)wqe);
2578                 }
2579         }
2580
2581         /* swap last not completed segment */
2582         if (seg_siz)
2583                 swap_wqe_data64((u64 *)wqe);
2584
2585         return data_size;
2586 }
2587
2588 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
2589         do {                                                    \
2590                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
2591                 (sge)->length = cpu_to_le32(vlength);           \
2592                 (sge)->flags = cpu_to_le32(vflags);             \
2593         } while (0)
2594
2595 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
2596         do {                                                    \
2597                 DMA_REGPAIR_LE(hdr->wr_id, vwr_id);             \
2598                 (hdr)->num_sges = num_sge;                      \
2599         } while (0)
2600
2601 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
2602         do {                                                    \
2603                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
2604                 (sge)->length = cpu_to_le32(vlength);           \
2605                 (sge)->l_key = cpu_to_le32(vlkey);              \
2606         } while (0)
2607
2608 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2609                                 struct ib_send_wr *wr)
2610 {
2611         u32 data_size = 0;
2612         int i;
2613
2614         for (i = 0; i < wr->num_sge; i++) {
2615                 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2616
2617                 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2618                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2619                 sge->length = cpu_to_le32(wr->sg_list[i].length);
2620                 data_size += wr->sg_list[i].length;
2621         }
2622
2623         if (wqe_size)
2624                 *wqe_size += wr->num_sge;
2625
2626         return data_size;
2627 }
2628
2629 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2630                                      struct qedr_qp *qp,
2631                                      struct rdma_sq_rdma_wqe_1st *rwqe,
2632                                      struct rdma_sq_rdma_wqe_2nd *rwqe2,
2633                                      struct ib_send_wr *wr,
2634                                      struct ib_send_wr **bad_wr)
2635 {
2636         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2637         DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2638
2639         if (wr->send_flags & IB_SEND_INLINE &&
2640             (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2641              wr->opcode == IB_WR_RDMA_WRITE)) {
2642                 u8 flags = 0;
2643
2644                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2645                 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2646                                                    bad_wr, &rwqe->flags, flags);
2647         }
2648
2649         return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2650 }
2651
2652 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2653                                      struct qedr_qp *qp,
2654                                      struct rdma_sq_send_wqe_1st *swqe,
2655                                      struct rdma_sq_send_wqe_2st *swqe2,
2656                                      struct ib_send_wr *wr,
2657                                      struct ib_send_wr **bad_wr)
2658 {
2659         memset(swqe2, 0, sizeof(*swqe2));
2660         if (wr->send_flags & IB_SEND_INLINE) {
2661                 u8 flags = 0;
2662
2663                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2664                 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2665                                                    bad_wr, &swqe->flags, flags);
2666         }
2667
2668         return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2669 }
2670
2671 static int qedr_prepare_reg(struct qedr_qp *qp,
2672                             struct rdma_sq_fmr_wqe_1st *fwqe1,
2673                             struct ib_reg_wr *wr)
2674 {
2675         struct qedr_mr *mr = get_qedr_mr(wr->mr);
2676         struct rdma_sq_fmr_wqe_2nd *fwqe2;
2677
2678         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2679         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2680         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2681         fwqe1->l_key = wr->key;
2682
2683         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2684                    !!(wr->access & IB_ACCESS_REMOTE_READ));
2685         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2686                    !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2687         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2688                    !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2689         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2690         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2691                    !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2692         fwqe2->fmr_ctrl = 0;
2693
2694         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2695                    ilog2(mr->ibmr.page_size) - 12);
2696
2697         fwqe2->length_hi = 0;
2698         fwqe2->length_lo = mr->ibmr.length;
2699         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2700         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2701
2702         qp->wqe_wr_id[qp->sq.prod].mr = mr;
2703
2704         return 0;
2705 }
2706
2707 enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2708 {
2709         switch (opcode) {
2710         case IB_WR_RDMA_WRITE:
2711         case IB_WR_RDMA_WRITE_WITH_IMM:
2712                 return IB_WC_RDMA_WRITE;
2713         case IB_WR_SEND_WITH_IMM:
2714         case IB_WR_SEND:
2715         case IB_WR_SEND_WITH_INV:
2716                 return IB_WC_SEND;
2717         case IB_WR_RDMA_READ:
2718                 return IB_WC_RDMA_READ;
2719         case IB_WR_ATOMIC_CMP_AND_SWP:
2720                 return IB_WC_COMP_SWAP;
2721         case IB_WR_ATOMIC_FETCH_AND_ADD:
2722                 return IB_WC_FETCH_ADD;
2723         case IB_WR_REG_MR:
2724                 return IB_WC_REG_MR;
2725         case IB_WR_LOCAL_INV:
2726                 return IB_WC_LOCAL_INV;
2727         default:
2728                 return IB_WC_SEND;
2729         }
2730 }
2731
2732 inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2733 {
2734         int wq_is_full, err_wr, pbl_is_full;
2735         struct qedr_dev *dev = qp->dev;
2736
2737         /* prevent SQ overflow and/or processing of a bad WR */
2738         err_wr = wr->num_sge > qp->sq.max_sges;
2739         wq_is_full = qedr_wq_is_full(&qp->sq);
2740         pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2741                       QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2742         if (wq_is_full || err_wr || pbl_is_full) {
2743                 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2744                         DP_ERR(dev,
2745                                "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2746                                qp);
2747                         qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2748                 }
2749
2750                 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2751                         DP_ERR(dev,
2752                                "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2753                                qp);
2754                         qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2755                 }
2756
2757                 if (pbl_is_full &&
2758                     !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2759                         DP_ERR(dev,
2760                                "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2761                                qp);
2762                         qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2763                 }
2764                 return false;
2765         }
2766         return true;
2767 }
2768
2769 int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2770                      struct ib_send_wr **bad_wr)
2771 {
2772         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2773         struct qedr_qp *qp = get_qedr_qp(ibqp);
2774         struct rdma_sq_atomic_wqe_1st *awqe1;
2775         struct rdma_sq_atomic_wqe_2nd *awqe2;
2776         struct rdma_sq_atomic_wqe_3rd *awqe3;
2777         struct rdma_sq_send_wqe_2st *swqe2;
2778         struct rdma_sq_local_inv_wqe *iwqe;
2779         struct rdma_sq_rdma_wqe_2nd *rwqe2;
2780         struct rdma_sq_send_wqe_1st *swqe;
2781         struct rdma_sq_rdma_wqe_1st *rwqe;
2782         struct rdma_sq_fmr_wqe_1st *fwqe1;
2783         struct rdma_sq_common_wqe *wqe;
2784         u32 length;
2785         int rc = 0;
2786         bool comp;
2787
2788         if (!qedr_can_post_send(qp, wr)) {
2789                 *bad_wr = wr;
2790                 return -ENOMEM;
2791         }
2792
2793         wqe = qed_chain_produce(&qp->sq.pbl);
2794         qp->wqe_wr_id[qp->sq.prod].signaled =
2795                 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2796
2797         wqe->flags = 0;
2798         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2799                    !!(wr->send_flags & IB_SEND_SOLICITED));
2800         comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2801         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2802         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2803                    !!(wr->send_flags & IB_SEND_FENCE));
2804         wqe->prev_wqe_size = qp->prev_wqe_size;
2805
2806         qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2807
2808         switch (wr->opcode) {
2809         case IB_WR_SEND_WITH_IMM:
2810                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2811                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2812                 swqe->wqe_size = 2;
2813                 swqe2 = qed_chain_produce(&qp->sq.pbl);
2814
2815                 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2816                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2817                                                    wr, bad_wr);
2818                 swqe->length = cpu_to_le32(length);
2819                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2820                 qp->prev_wqe_size = swqe->wqe_size;
2821                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2822                 break;
2823         case IB_WR_SEND:
2824                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2825                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2826
2827                 swqe->wqe_size = 2;
2828                 swqe2 = qed_chain_produce(&qp->sq.pbl);
2829                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2830                                                    wr, bad_wr);
2831                 swqe->length = cpu_to_le32(length);
2832                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2833                 qp->prev_wqe_size = swqe->wqe_size;
2834                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2835                 break;
2836         case IB_WR_SEND_WITH_INV:
2837                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2838                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2839                 swqe2 = qed_chain_produce(&qp->sq.pbl);
2840                 swqe->wqe_size = 2;
2841                 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2842                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2843                                                    wr, bad_wr);
2844                 swqe->length = cpu_to_le32(length);
2845                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2846                 qp->prev_wqe_size = swqe->wqe_size;
2847                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2848                 break;
2849
2850         case IB_WR_RDMA_WRITE_WITH_IMM:
2851                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2852                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2853
2854                 rwqe->wqe_size = 2;
2855                 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2856                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2857                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2858                                                    wr, bad_wr);
2859                 rwqe->length = cpu_to_le32(length);
2860                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2861                 qp->prev_wqe_size = rwqe->wqe_size;
2862                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2863                 break;
2864         case IB_WR_RDMA_WRITE:
2865                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2866                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2867
2868                 rwqe->wqe_size = 2;
2869                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2870                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2871                                                    wr, bad_wr);
2872                 rwqe->length = cpu_to_le32(length);
2873                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2874                 qp->prev_wqe_size = rwqe->wqe_size;
2875                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2876                 break;
2877         case IB_WR_RDMA_READ_WITH_INV:
2878                 DP_ERR(dev,
2879                        "RDMA READ WITH INVALIDATE not supported\n");
2880                 *bad_wr = wr;
2881                 rc = -EINVAL;
2882                 break;
2883
2884         case IB_WR_RDMA_READ:
2885                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2886                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2887
2888                 rwqe->wqe_size = 2;
2889                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2890                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2891                                                    wr, bad_wr);
2892                 rwqe->length = cpu_to_le32(length);
2893                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2894                 qp->prev_wqe_size = rwqe->wqe_size;
2895                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2896                 break;
2897
2898         case IB_WR_ATOMIC_CMP_AND_SWP:
2899         case IB_WR_ATOMIC_FETCH_AND_ADD:
2900                 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2901                 awqe1->wqe_size = 4;
2902
2903                 awqe2 = qed_chain_produce(&qp->sq.pbl);
2904                 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2905                 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2906
2907                 awqe3 = qed_chain_produce(&qp->sq.pbl);
2908
2909                 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2910                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2911                         DMA_REGPAIR_LE(awqe3->swap_data,
2912                                        atomic_wr(wr)->compare_add);
2913                 } else {
2914                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2915                         DMA_REGPAIR_LE(awqe3->swap_data,
2916                                        atomic_wr(wr)->swap);
2917                         DMA_REGPAIR_LE(awqe3->cmp_data,
2918                                        atomic_wr(wr)->compare_add);
2919                 }
2920
2921                 qedr_prepare_sq_sges(qp, NULL, wr);
2922
2923                 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2924                 qp->prev_wqe_size = awqe1->wqe_size;
2925                 break;
2926
2927         case IB_WR_LOCAL_INV:
2928                 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2929                 iwqe->wqe_size = 1;
2930
2931                 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2932                 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2933                 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2934                 qp->prev_wqe_size = iwqe->wqe_size;
2935                 break;
2936         case IB_WR_REG_MR:
2937                 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2938                 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2939                 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2940                 fwqe1->wqe_size = 2;
2941
2942                 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2943                 if (rc) {
2944                         DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2945                         *bad_wr = wr;
2946                         break;
2947                 }
2948
2949                 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2950                 qp->prev_wqe_size = fwqe1->wqe_size;
2951                 break;
2952         default:
2953                 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2954                 rc = -EINVAL;
2955                 *bad_wr = wr;
2956                 break;
2957         }
2958
2959         if (*bad_wr) {
2960                 u16 value;
2961
2962                 /* Restore prod to its position before
2963                  * this WR was processed
2964                  */
2965                 value = le16_to_cpu(qp->sq.db_data.data.value);
2966                 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2967
2968                 /* Restore prev_wqe_size */
2969                 qp->prev_wqe_size = wqe->prev_wqe_size;
2970                 rc = -EINVAL;
2971                 DP_ERR(dev, "POST SEND FAILED\n");
2972         }
2973
2974         return rc;
2975 }
2976
2977 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2978                    struct ib_send_wr **bad_wr)
2979 {
2980         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2981         struct qedr_qp *qp = get_qedr_qp(ibqp);
2982         unsigned long flags;
2983         int rc = 0;
2984
2985         *bad_wr = NULL;
2986
2987         if (qp->qp_type == IB_QPT_GSI)
2988                 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2989
2990         spin_lock_irqsave(&qp->q_lock, flags);
2991
2992         if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
2993             (qp->state != QED_ROCE_QP_STATE_ERR) &&
2994             (qp->state != QED_ROCE_QP_STATE_SQD)) {
2995                 spin_unlock_irqrestore(&qp->q_lock, flags);
2996                 *bad_wr = wr;
2997                 DP_DEBUG(dev, QEDR_MSG_CQ,
2998                          "QP in wrong state! QP icid=0x%x state %d\n",
2999                          qp->icid, qp->state);
3000                 return -EINVAL;
3001         }
3002
3003         while (wr) {
3004                 rc = __qedr_post_send(ibqp, wr, bad_wr);
3005                 if (rc)
3006                         break;
3007
3008                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3009
3010                 qedr_inc_sw_prod(&qp->sq);
3011
3012                 qp->sq.db_data.data.value++;
3013
3014                 wr = wr->next;
3015         }
3016
3017         /* Trigger doorbell
3018          * If there was a failure in the first WR then it will be triggered in
3019          * vane. However this is not harmful (as long as the producer value is
3020          * unchanged). For performance reasons we avoid checking for this
3021          * redundant doorbell.
3022          */
3023         wmb();
3024         writel(qp->sq.db_data.raw, qp->sq.db);
3025
3026         /* Make sure write sticks */
3027         mmiowb();
3028
3029         spin_unlock_irqrestore(&qp->q_lock, flags);
3030
3031         return rc;
3032 }
3033
3034 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3035                    struct ib_recv_wr **bad_wr)
3036 {
3037         struct qedr_qp *qp = get_qedr_qp(ibqp);
3038         struct qedr_dev *dev = qp->dev;
3039         unsigned long flags;
3040         int status = 0;
3041
3042         if (qp->qp_type == IB_QPT_GSI)
3043                 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3044
3045         spin_lock_irqsave(&qp->q_lock, flags);
3046
3047         if (qp->state == QED_ROCE_QP_STATE_RESET) {
3048                 spin_unlock_irqrestore(&qp->q_lock, flags);
3049                 *bad_wr = wr;
3050                 return -EINVAL;
3051         }
3052
3053         while (wr) {
3054                 int i;
3055
3056                 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3057                     QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3058                     wr->num_sge > qp->rq.max_sges) {
3059                         DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3060                                qed_chain_get_elem_left_u32(&qp->rq.pbl),
3061                                QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3062                                qp->rq.max_sges);
3063                         status = -ENOMEM;
3064                         *bad_wr = wr;
3065                         break;
3066                 }
3067                 for (i = 0; i < wr->num_sge; i++) {
3068                         u32 flags = 0;
3069                         struct rdma_rq_sge *rqe =
3070                             qed_chain_produce(&qp->rq.pbl);
3071
3072                         /* First one must include the number
3073                          * of SGE in the list
3074                          */
3075                         if (!i)
3076                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3077                                           wr->num_sge);
3078
3079                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3080                                   wr->sg_list[i].lkey);
3081
3082                         RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3083                                    wr->sg_list[i].length, flags);
3084                 }
3085
3086                 /* Special case of no sges. FW requires between 1-4 sges...
3087                  * in this case we need to post 1 sge with length zero. this is
3088                  * because rdma write with immediate consumes an RQ.
3089                  */
3090                 if (!wr->num_sge) {
3091                         u32 flags = 0;
3092                         struct rdma_rq_sge *rqe =
3093                             qed_chain_produce(&qp->rq.pbl);
3094
3095                         /* First one must include the number
3096                          * of SGE in the list
3097                          */
3098                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
3099                         SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3100
3101                         RQ_SGE_SET(rqe, 0, 0, flags);
3102                         i = 1;
3103                 }
3104
3105                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3106                 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3107
3108                 qedr_inc_sw_prod(&qp->rq);
3109
3110                 /* Flush all the writes before signalling doorbell */
3111                 wmb();
3112
3113                 qp->rq.db_data.data.value++;
3114
3115                 writel(qp->rq.db_data.raw, qp->rq.db);
3116
3117                 /* Make sure write sticks */
3118                 mmiowb();
3119
3120                 wr = wr->next;
3121         }
3122
3123         spin_unlock_irqrestore(&qp->q_lock, flags);
3124
3125         return status;
3126 }
3127
3128 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3129 {
3130         struct rdma_cqe_requester *resp_cqe = &cqe->req;
3131
3132         return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3133                 cq->pbl_toggle;
3134 }
3135
3136 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3137 {
3138         struct rdma_cqe_requester *resp_cqe = &cqe->req;
3139         struct qedr_qp *qp;
3140
3141         qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3142                                                    resp_cqe->qp_handle.lo,
3143                                                    u64);
3144         return qp;
3145 }
3146
3147 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3148 {
3149         struct rdma_cqe_requester *resp_cqe = &cqe->req;
3150
3151         return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3152 }
3153
3154 /* Return latest CQE (needs processing) */
3155 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3156 {
3157         return cq->latest_cqe;
3158 }
3159
3160 /* In fmr we need to increase the number of fmr completed counter for the fmr
3161  * algorithm determining whether we can free a pbl or not.
3162  * we need to perform this whether the work request was signaled or not. for
3163  * this purpose we call this function from the condition that checks if a wr
3164  * should be skipped, to make sure we don't miss it ( possibly this fmr
3165  * operation was not signalted)
3166  */
3167 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3168 {
3169         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3170                 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3171 }
3172
3173 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3174                        struct qedr_cq *cq, int num_entries,
3175                        struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3176                        int force)
3177 {
3178         u16 cnt = 0;
3179
3180         while (num_entries && qp->sq.wqe_cons != hw_cons) {
3181                 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3182                         qedr_chk_if_fmr(qp);
3183                         /* skip WC */
3184                         goto next_cqe;
3185                 }
3186
3187                 /* fill WC */
3188                 wc->status = status;
3189                 wc->vendor_err = 0;
3190                 wc->wc_flags = 0;
3191                 wc->src_qp = qp->id;
3192                 wc->qp = &qp->ibqp;
3193
3194                 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3195                 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3196
3197                 switch (wc->opcode) {
3198                 case IB_WC_RDMA_WRITE:
3199                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3200                         break;
3201                 case IB_WC_COMP_SWAP:
3202                 case IB_WC_FETCH_ADD:
3203                         wc->byte_len = 8;
3204                         break;
3205                 case IB_WC_REG_MR:
3206                         qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3207                         break;
3208                 default:
3209                         break;
3210                 }
3211
3212                 num_entries--;
3213                 wc++;
3214                 cnt++;
3215 next_cqe:
3216                 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3217                         qed_chain_consume(&qp->sq.pbl);
3218                 qedr_inc_sw_cons(&qp->sq);
3219         }
3220
3221         return cnt;
3222 }
3223
3224 static int qedr_poll_cq_req(struct qedr_dev *dev,
3225                             struct qedr_qp *qp, struct qedr_cq *cq,
3226                             int num_entries, struct ib_wc *wc,
3227                             struct rdma_cqe_requester *req)
3228 {
3229         int cnt = 0;
3230
3231         switch (req->status) {
3232         case RDMA_CQE_REQ_STS_OK:
3233                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3234                                   IB_WC_SUCCESS, 0);
3235                 break;
3236         case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3237                 if (qp->state != QED_ROCE_QP_STATE_ERR)
3238                         DP_ERR(dev,
3239                                "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3240                                cq->icid, qp->icid);
3241                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3242                                   IB_WC_WR_FLUSH_ERR, 1);
3243                 break;
3244         default:
3245                 /* process all WQE before the cosumer */
3246                 qp->state = QED_ROCE_QP_STATE_ERR;
3247                 cnt = process_req(dev, qp, cq, num_entries, wc,
3248                                   req->sq_cons - 1, IB_WC_SUCCESS, 0);
3249                 wc += cnt;
3250                 /* if we have extra WC fill it with actual error info */
3251                 if (cnt < num_entries) {
3252                         enum ib_wc_status wc_status;
3253
3254                         switch (req->status) {
3255                         case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
3256                                 DP_ERR(dev,
3257                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3258                                        cq->icid, qp->icid);
3259                                 wc_status = IB_WC_BAD_RESP_ERR;
3260                                 break;
3261                         case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
3262                                 DP_ERR(dev,
3263                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3264                                        cq->icid, qp->icid);
3265                                 wc_status = IB_WC_LOC_LEN_ERR;
3266                                 break;
3267                         case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
3268                                 DP_ERR(dev,
3269                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3270                                        cq->icid, qp->icid);
3271                                 wc_status = IB_WC_LOC_QP_OP_ERR;
3272                                 break;
3273                         case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
3274                                 DP_ERR(dev,
3275                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3276                                        cq->icid, qp->icid);
3277                                 wc_status = IB_WC_LOC_PROT_ERR;
3278                                 break;
3279                         case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
3280                                 DP_ERR(dev,
3281                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3282                                        cq->icid, qp->icid);
3283                                 wc_status = IB_WC_MW_BIND_ERR;
3284                                 break;
3285                         case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
3286                                 DP_ERR(dev,
3287                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3288                                        cq->icid, qp->icid);
3289                                 wc_status = IB_WC_REM_INV_REQ_ERR;
3290                                 break;
3291                         case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
3292                                 DP_ERR(dev,
3293                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3294                                        cq->icid, qp->icid);
3295                                 wc_status = IB_WC_REM_ACCESS_ERR;
3296                                 break;
3297                         case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
3298                                 DP_ERR(dev,
3299                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3300                                        cq->icid, qp->icid);
3301                                 wc_status = IB_WC_REM_OP_ERR;
3302                                 break;
3303                         case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
3304                                 DP_ERR(dev,
3305                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3306                                        cq->icid, qp->icid);
3307                                 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
3308                                 break;
3309                         case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
3310                                 DP_ERR(dev,
3311                                        "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3312                                        cq->icid, qp->icid);
3313                                 wc_status = IB_WC_RETRY_EXC_ERR;
3314                                 break;
3315                         default:
3316                                 DP_ERR(dev,
3317                                        "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3318                                        cq->icid, qp->icid);
3319                                 wc_status = IB_WC_GENERAL_ERR;
3320                         }
3321                         cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
3322                                            wc_status, 1);
3323                 }
3324         }
3325
3326         return cnt;
3327 }
3328
3329 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3330                                struct qedr_cq *cq, struct ib_wc *wc,
3331                                struct rdma_cqe_responder *resp, u64 wr_id)
3332 {
3333         enum ib_wc_status wc_status = IB_WC_SUCCESS;
3334         u8 flags;
3335
3336         wc->opcode = IB_WC_RECV;
3337         wc->wc_flags = 0;
3338
3339         switch (resp->status) {
3340         case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
3341                 wc_status = IB_WC_LOC_ACCESS_ERR;
3342                 break;
3343         case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
3344                 wc_status = IB_WC_LOC_LEN_ERR;
3345                 break;
3346         case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
3347                 wc_status = IB_WC_LOC_QP_OP_ERR;
3348                 break;
3349         case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
3350                 wc_status = IB_WC_LOC_PROT_ERR;
3351                 break;
3352         case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
3353                 wc_status = IB_WC_MW_BIND_ERR;
3354                 break;
3355         case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
3356                 wc_status = IB_WC_REM_INV_RD_REQ_ERR;
3357                 break;
3358         case RDMA_CQE_RESP_STS_OK:
3359                 wc_status = IB_WC_SUCCESS;
3360                 wc->byte_len = le32_to_cpu(resp->length);
3361
3362                 flags = resp->flags & QEDR_RESP_RDMA_IMM;
3363
3364                 if (flags == QEDR_RESP_RDMA_IMM)
3365                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3366
3367                 if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
3368                         wc->ex.imm_data =
3369                                 le32_to_cpu(resp->imm_data_or_inv_r_Key);
3370                         wc->wc_flags |= IB_WC_WITH_IMM;
3371                 }
3372                 break;
3373         default:
3374                 wc->status = IB_WC_GENERAL_ERR;
3375                 DP_ERR(dev, "Invalid CQE status detected\n");
3376         }
3377
3378         /* fill WC */
3379         wc->status = wc_status;
3380         wc->vendor_err = 0;
3381         wc->src_qp = qp->id;
3382         wc->qp = &qp->ibqp;
3383         wc->wr_id = wr_id;
3384 }
3385
3386 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
3387                             struct qedr_cq *cq, struct ib_wc *wc,
3388                             struct rdma_cqe_responder *resp)
3389 {
3390         u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3391
3392         __process_resp_one(dev, qp, cq, wc, resp, wr_id);
3393
3394         while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3395                 qed_chain_consume(&qp->rq.pbl);
3396         qedr_inc_sw_cons(&qp->rq);
3397
3398         return 1;
3399 }
3400
3401 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
3402                               int num_entries, struct ib_wc *wc, u16 hw_cons)
3403 {
3404         u16 cnt = 0;
3405
3406         while (num_entries && qp->rq.wqe_cons != hw_cons) {
3407                 /* fill WC */
3408                 wc->status = IB_WC_WR_FLUSH_ERR;
3409                 wc->vendor_err = 0;
3410                 wc->wc_flags = 0;
3411                 wc->src_qp = qp->id;
3412                 wc->byte_len = 0;
3413                 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
3414                 wc->qp = &qp->ibqp;
3415                 num_entries--;
3416                 wc++;
3417                 cnt++;
3418                 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
3419                         qed_chain_consume(&qp->rq.pbl);
3420                 qedr_inc_sw_cons(&qp->rq);
3421         }
3422
3423         return cnt;
3424 }
3425
3426 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3427                                  struct rdma_cqe_responder *resp, int *update)
3428 {
3429         if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
3430                 consume_cqe(cq);
3431                 *update |= 1;
3432         }
3433 }
3434
3435 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
3436                              struct qedr_cq *cq, int num_entries,
3437                              struct ib_wc *wc, struct rdma_cqe_responder *resp,
3438                              int *update)
3439 {
3440         int cnt;
3441
3442         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
3443                 cnt = process_resp_flush(qp, cq, num_entries, wc,
3444                                          resp->rq_cons);
3445                 try_consume_resp_cqe(cq, qp, resp, update);
3446         } else {
3447                 cnt = process_resp_one(dev, qp, cq, wc, resp);
3448                 consume_cqe(cq);
3449                 *update |= 1;
3450         }
3451
3452         return cnt;
3453 }
3454
3455 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
3456                                 struct rdma_cqe_requester *req, int *update)
3457 {
3458         if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
3459                 consume_cqe(cq);
3460                 *update |= 1;
3461         }
3462 }
3463
3464 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3465 {
3466         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3467         struct qedr_cq *cq = get_qedr_cq(ibcq);
3468         union rdma_cqe *cqe = cq->latest_cqe;
3469         u32 old_cons, new_cons;
3470         unsigned long flags;
3471         int update = 0;
3472         int done = 0;
3473
3474         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
3475                 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3476
3477         spin_lock_irqsave(&cq->cq_lock, flags);
3478         old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3479         while (num_entries && is_valid_cqe(cq, cqe)) {
3480                 struct qedr_qp *qp;
3481                 int cnt = 0;
3482
3483                 /* prevent speculative reads of any field of CQE */
3484                 rmb();
3485
3486                 qp = cqe_get_qp(cqe);
3487                 if (!qp) {
3488                         WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
3489                         break;
3490                 }
3491
3492                 wc->qp = &qp->ibqp;
3493
3494                 switch (cqe_get_type(cqe)) {
3495                 case RDMA_CQE_TYPE_REQUESTER:
3496                         cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
3497                                                &cqe->req);
3498                         try_consume_req_cqe(cq, qp, &cqe->req, &update);
3499                         break;
3500                 case RDMA_CQE_TYPE_RESPONDER_RQ:
3501                         cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
3502                                                 &cqe->resp, &update);
3503                         break;
3504                 case RDMA_CQE_TYPE_INVALID:
3505                 default:
3506                         DP_ERR(dev, "Error: invalid CQE type = %d\n",
3507                                cqe_get_type(cqe));
3508                 }
3509                 num_entries -= cnt;
3510                 wc += cnt;
3511                 done += cnt;
3512
3513                 cqe = get_cqe(cq);
3514         }
3515         new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3516
3517         cq->cq_cons += new_cons - old_cons;
3518
3519         if (update)
3520                 /* doorbell notifies abount latest VALID entry,
3521                  * but chain already point to the next INVALID one
3522                  */
3523                 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
3524
3525         spin_unlock_irqrestore(&cq->cq_lock, flags);
3526         return done;
3527 }
3528
3529 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
3530                      u8 port_num,
3531                      const struct ib_wc *in_wc,
3532                      const struct ib_grh *in_grh,
3533                      const struct ib_mad_hdr *mad_hdr,
3534                      size_t in_mad_size, struct ib_mad_hdr *out_mad,
3535                      size_t *out_mad_size, u16 *out_mad_pkey_index)
3536 {
3537         struct qedr_dev *dev = get_qedr_dev(ibdev);
3538
3539         DP_DEBUG(dev, QEDR_MSG_GSI,
3540                  "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
3541                  mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
3542                  mad_hdr->class_specific, mad_hdr->class_version,
3543                  mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
3544         return IB_MAD_RESULT_SUCCESS;
3545 }
3546
3547 int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
3548                         struct ib_port_immutable *immutable)
3549 {
3550         struct ib_port_attr attr;
3551         int err;
3552
3553         err = qedr_query_port(ibdev, port_num, &attr);
3554         if (err)
3555                 return err;
3556
3557         immutable->pkey_tbl_len = attr.pkey_tbl_len;
3558         immutable->gid_tbl_len = attr.gid_tbl_len;
3559         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
3560                                     RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3561         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3562
3563         return 0;
3564 }